Merge "dwc3-msm: Log link TRB address and also readback ring doorbell register"
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 069e8d5..cadb7a9 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -357,6 +357,7 @@
 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
 		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 		/sys/devices/system/cpu/vulnerabilities/l1tf
+		/sys/devices/system/cpu/vulnerabilities/mds
 Date:		January 2018
 Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:	Information about CPU vulnerabilities
@@ -369,8 +370,7 @@
 		"Vulnerable"	  CPU is affected and no mitigation in effect
 		"Mitigation: $M"  CPU is affected and mitigation $M is in effect
 
-		Details about the l1tf file can be found in
-		Documentation/admin-guide/l1tf.rst
+		See also: Documentation/hw-vuln/index.rst
 
 What:		/sys/devices/system/cpu/smt
 		/sys/devices/system/cpu/smt/active
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index e09db01..e916c1e 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -86,6 +86,13 @@
 		The unit size is one block, now only support configuring in range
 		of [1, 512].
 
+What:          /sys/fs/f2fs/<disk>/umount_discard_timeout
+Date:          January 2019
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+		Set timeout to issue discard commands during umount.
+		Default: 5 secs
+
 What:		/sys/fs/f2fs/<disk>/max_victim_search
 Date:		January 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
new file mode 100644
index 0000000..4fb40fe
--- /dev/null
+++ b/Documentation/accounting/psi.txt
@@ -0,0 +1,180 @@
+================================
+PSI - Pressure Stall Information
+================================
+
+:Date: April, 2018
+:Author: Johannes Weiner <hannes@cmpxchg.org>
+
+When CPU, memory or IO devices are contended, workloads experience
+latency spikes, throughput losses, and run the risk of OOM kills.
+
+Without an accurate measure of such contention, users are forced to
+either play it safe and under-utilize their hardware resources, or
+roll the dice and frequently suffer the disruptions resulting from
+excessive overcommit.
+
+The psi feature identifies and quantifies the disruptions caused by
+such resource crunches and the time impact it has on complex workloads
+or even entire systems.
+
+Having an accurate measure of productivity losses caused by resource
+scarcity aids users in sizing workloads to hardware--or provisioning
+hardware according to workload demand.
+
+As psi aggregates this information in realtime, systems can be managed
+dynamically using techniques such as load shedding, migrating jobs to
+other systems or data centers, or strategically pausing or killing low
+priority or restartable batch jobs.
+
+This allows maximizing hardware utilization without sacrificing
+workload health or risking major disruptions such as OOM kills.
+
+Pressure interface
+==================
+
+Pressure information for each resource is exported through the
+respective file in /proc/pressure/ -- cpu, memory, and io.
+
+The format for CPU is as such:
+
+some avg10=0.00 avg60=0.00 avg300=0.00 total=0
+
+and for memory and IO:
+
+some avg10=0.00 avg60=0.00 avg300=0.00 total=0
+full avg10=0.00 avg60=0.00 avg300=0.00 total=0
+
+The "some" line indicates the share of time in which at least some
+tasks are stalled on a given resource.
+
+The "full" line indicates the share of time in which all non-idle
+tasks are stalled on a given resource simultaneously. In this state
+actual CPU cycles are going to waste, and a workload that spends
+extended time in this state is considered to be thrashing. This has
+severe impact on performance, and it's useful to distinguish this
+situation from a state where some tasks are stalled but the CPU is
+still doing productive work. As such, time spent in this subset of the
+stall state is tracked separately and exported in the "full" averages.
+
+The ratios are tracked as recent trends over ten, sixty, and three
+hundred second windows, which gives insight into short term events as
+well as medium and long term trends. The total absolute stall time is
+tracked and exported as well, to allow detection of latency spikes
+which wouldn't necessarily make a dent in the time averages, or to
+average trends over custom time frames.
+
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger  is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+	const char trig[] = "some 150000 1000000";
+	struct pollfd fds;
+	int n;
+
+	fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+	if (fds.fd < 0) {
+		printf("/proc/pressure/memory open error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+	fds.events = POLLPRI;
+
+	if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+		printf("/proc/pressure/memory write error: %s\n",
+			strerror(errno));
+		return 1;
+	}
+
+	printf("waiting for events...\n");
+	while (1) {
+		n = poll(&fds, 1, -1);
+		if (n < 0) {
+			printf("poll error: %s\n", strerror(errno));
+			return 1;
+		}
+		if (fds.revents & POLLERR) {
+			printf("got POLLERR, event source is gone\n");
+			return 0;
+		}
+		if (fds.revents & POLLPRI) {
+			printf("event triggered!\n");
+		} else {
+			printf("unknown event received: 0x%x\n", fds.revents);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+Cgroup2 interface
+=================
+
+In a system with a CONFIG_CGROUP=y kernel and the cgroup2 filesystem
+mounted, pressure stall information is also tracked for tasks grouped
+into cgroups. Each subdirectory in the cgroupfs mountpoint contains
+cpu.pressure, memory.pressure, and io.pressure files; the format is
+the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
index 5254527..b9e060c 100644
--- a/Documentation/arm/kernel_mode_neon.txt
+++ b/Documentation/arm/kernel_mode_neon.txt
@@ -6,7 +6,7 @@
 * Use only NEON instructions, or VFP instructions that don't rely on support
   code
 * Isolate your NEON code in a separate compilation unit, and compile it with
-  '-mfpu=neon -mfloat-abi=softfp'
+  '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
 * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
   NEON code
 * Don't sleep in your NEON code, and be aware that it will be executed with
@@ -87,7 +87,7 @@
 Therefore, the recommended and only supported way of using NEON/VFP in the
 kernel is by adhering to the following rules:
 * isolate the NEON code in a separate compilation unit and compile it with
-  '-mfpu=neon -mfloat-abi=softfp';
+  '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
 * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
   into the unit containing the NEON code from a compilation unit which is *not*
   built with the GCC flag '-mfpu=neon' set.
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 4cc07ce..73950fd 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -717,6 +717,12 @@
 	$PERIOD duration.  If only one number is written, $MAX is
 	updated.
 
+  cpu.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for CPU. See
+	Documentation/accounting/psi.txt for details.
+
 
 5-2. Memory
 
@@ -925,6 +931,12 @@
 	Swap usage hard limit.  If a cgroup's swap usage reaches this
 	limit, anonymous meomry of the cgroup will not be swapped out.
 
+  memory.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for memory. See
+	Documentation/accounting/psi.txt for details.
+
 
 5-2-2. Usage Guidelines
 
@@ -1055,6 +1067,12 @@
 
 	  8:16 rbps=2097152 wbps=max riops=max wiops=max
 
+  io.pressure
+	A read-only nested-key file which exists on non-root cgroups.
+
+	Shows pressure stall information for IO. See
+	Documentation/accounting/psi.txt for details.
+
 
 5-3-2. Writeback
 
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
index 19eedfe..adaae05 100644
--- a/Documentation/dev-tools/gcov.rst
+++ b/Documentation/dev-tools/gcov.rst
@@ -34,10 +34,6 @@
         CONFIG_DEBUG_FS=y
         CONFIG_GCOV_KERNEL=y
 
-select the gcc's gcov format, default is autodetect based on gcc version::
-
-        CONFIG_GCOV_FORMAT_AUTODETECT=y
-
 and to get coverage data for the entire kernel::
 
         CONFIG_GCOV_PROFILE_ALL=y
@@ -169,6 +165,20 @@
       [user@build] gcov -o /tmp/coverage/tmp/out/init main.c
 
 
+Note on compilers
+-----------------
+
+GCC and LLVM gcov tools are not necessarily compatible. Use gcov_ to work with
+GCC-generated .gcno and .gcda files, and use llvm-cov_ for Clang.
+
+.. _gcov: http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
+.. _llvm-cov: https://llvm.org/docs/CommandGuide/llvm-cov.html
+
+Build differences between GCC and Clang gcov are handled by Kconfig. It
+automatically selects the appropriate gcov format depending on the detected
+toolchain.
+
+
 Troubleshooting
 ---------------
 
diff --git a/Documentation/device-mapper/dm-bow.txt b/Documentation/device-mapper/dm-bow.txt
new file mode 100644
index 0000000..e3fc4d2
--- /dev/null
+++ b/Documentation/device-mapper/dm-bow.txt
@@ -0,0 +1,99 @@
+dm_bow (backup on write)
+========================
+
+dm_bow is a device mapper driver that uses the free space on a device to back up
+data that is overwritten. The changes can then be committed by a simple state
+change, or rolled back by removing the dm_bow device and running a command line
+utility over the underlying device.
+
+dm_bow has three states, set by writing ‘1’ or ‘2’ to /sys/block/dm-?/bow/state.
+It is only possible to go from state 0 (initial state) to state 1, and then from
+state 1 to state 2.
+
+State 0: dm_bow collects all trims to the device and assumes that these mark
+free space on the overlying file system that can be safely used. Typically the
+mount code would create the dm_bow device, mount the file system, call the
+FITRIM ioctl on the file system then switch to state 1. These trims are not
+propagated to the underlying device.
+
+State 1: All writes to the device cause the underlying data to be backed up to
+the free (trimmed) area as needed in such a way as they can be restored.
+However, the writes, with one exception, then happen exactly as they would
+without dm_bow, so the device is always in a good final state. The exception is
+that sector 0 is used to keep a log of the latest changes, both to indicate that
+we are in this state and to allow rollback. See below for all details. If there
+isn't enough free space, writes are failed with -ENOSPC.
+
+State 2: The transition to state 2 triggers replacing the special sector 0 with
+the normal sector 0, and the freeing of all state information. dm_bow then
+becomes a pass-through driver, allowing the device to continue to be used with
+minimal performance impact.
+
+Usage
+=====
+dm-bow takes one command line parameter, the name of the underlying device.
+
+dm-bow will typically be used in the following way. dm-bow will be loaded with a
+suitable underlying device and the resultant device will be mounted. A file
+system trim will be issued via the FITRIM ioctl, then the device will be
+switched to state 1. The file system will now be used as normal. At some point,
+the changes can either be committed by switching to state 2, or rolled back by
+unmounting the file system, removing the dm-bow device and running the command
+line utility. Note that rebooting the device will be equivalent to unmounting
+and removing, but the command line utility must still be run
+
+Details of operation in state 1
+===============================
+
+dm_bow maintains a type for all sectors. A sector can be any of:
+
+SECTOR0
+SECTOR0_CURRENT
+UNCHANGED
+FREE
+CHANGED
+BACKUP
+
+SECTOR0 is the first sector on the device, and is used to hold the log of
+changes. This is the one exception.
+
+SECTOR0_CURRENT is a sector picked from the FREE sectors, and is where reads and
+writes from the true sector zero are redirected to. Note that like any backup
+sector, if the sector is written to directly, it must be moved again.
+
+UNCHANGED means that the sector has not been changed since we entered state 1.
+Thus if it is written to or trimmed, the contents must first be backed up.
+
+FREE means that the sector was trimmed in state 0 and has not yet been written
+to or used for backup. On being written to, a FREE sector is changed to CHANGED.
+
+CHANGED means that the sector has been modified, and can be further modified
+without further backup.
+
+BACKUP means that this is a free sector being used as a backup. On being written
+to, the contents must first be backed up again.
+
+All backup operations are logged to the first sector. The log sector has the
+format:
+--------------------------------------------------------
+| Magic | Count | Sequence | Log entry | Log entry | …
+--------------------------------------------------------
+
+Magic is a magic number. Count is the number of log entries. Sequence is 0
+initially. A log entry is
+
+-----------------------------------
+| Source | Dest | Size | Checksum |
+-----------------------------------
+
+When SECTOR0 is full, the log sector is backed up and another empty log sector
+created with sequence number one higher. The first entry in any log entry with
+sequence > 0 therefore must be the log of the backing up of the previous log
+sector. Note that sequence is not strictly needed, but is a useful sanity check
+and potentially limits the time spent trying to restore a corrupted snapshot.
+
+On entering state 1, dm_bow has a list of free sectors. All other sectors are
+unchanged. Sector0_current is selected from the free sectors and the contents of
+sector 0 are copied there. The sector 0 is backed up, which triggers the first
+log entry to be written.
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 72821b6..4f8e4f5 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -146,6 +146,9 @@
 - SDA429
   compatible = "qcom,sda429"
 
+- SDM429W
+  compatible = "qcom,sdm429w"
+
 - QM215
   compatible = "qcom, qm215"
 
@@ -366,6 +369,7 @@
 compatible = "qcom,sdm429-cdp"
 compatible = "qcom,sdm429-mtp"
 compatible = "qcom,sdm429-qrd"
+compatible = "qcom,sdm429w-qrd"
 compatible = "qcom,sda429-cdp"
 compatible = "qcom,sda429-mtp"
 compatible = "qcom,sdm439-cdp"
@@ -407,5 +411,5 @@
 compatible = "qcom,sdxpoorwills-mtp"
 compatible = "qcom,sdxpoorwills-cdp"
 compatible = "qcom,sdxpoorwills-ttp"
-compatible = "qcom,sdxpoorwills-adp"
+compatible = "qcom,sdxpoorwills-ccard"
 compatible = "qcom,mdm9607-ttp"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_dev_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_dev_xprt.txt
new file mode 100644
index 0000000..2077fe0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_dev_xprt.txt
@@ -0,0 +1,21 @@
+Qualcomm Technologies, Inc. IPC Router MHI_DEV Transport
+
+Required properties:
+-compatible:		should be "qcom,ipc-router-mhi-dev-xprt".
+-qcom,out-chan-id:	MHI Channel ID for the transmit path.
+-qcom,in-chan-id:	MHI Channel ID for the receive path.
+-qcom,xprt-remote:	string that defines the edge of the transport.
+-qcom,xprt-linkid:	unique integer to identify the tier to which the link
+			belongs to in the network and is used to avoid the
+			routing loops while forwarding the broadcast messages.
+-qcom,xprt-version:	unique version ID used by MHI transport header.
+
+Example:
+	qcom,ipc_router_external_ap_xprt {
+		compatible = "qcom,ipc-router-mhi-dev-xprt";
+	        qcom,out-chan-id = <34>;
+		qcom,in-chan-id = <35>;
+		qcom,xprt-remote = "external-ap";
+		qcom,xprt-linkid = <2>;
+		qcom,xprt-version = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cyttsp5.txt b/Documentation/devicetree/bindings/input/touchscreen/cyttsp5.txt
new file mode 100644
index 0000000..28a0a2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/cyttsp5.txt
@@ -0,0 +1,88 @@
+* Cypress cyttsp5 touchscreen controller
+
+Required properties:
+ - compatible		: must be "cy,cyttsp5_i2c_adapter"
+ - reg			: Device I2C address or SPI chip select number
+ - interrupt-parent	: the phandle for the gpio controller
+			  (see interrupt binding[0]).
+ - interrupts		: (gpio) interrupt to which the chip is connected
+			  (see interrupt binding[0]).
+ - vcc_i2c-supply	: power supply
+ - vdd-supply		: power supply
+ - cy,mt		: multi-touch
+
+Optional properties:
+ - cy,btn		: button
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/gpio/gpio.txt
+
+Example:
+	&i2c1 {
+		/* ... */
+		tsc@24 {
+			compatible = "cy,cyttsp5_i2c_adapter";
+			reg = <0x24>;
+			interrupt-parent = <&msm_gpio>;
+			interrupts = <13 0x2008>;
+			cy,adapter_id = "cyttsp5_i2c_adapter";
+			vcc_i2c-supply = <&pm8916_l6>;
+			vdd-supply = <&pm8916_l17>;
+
+			cy,core {
+				cy,name = "cyttsp5_core";
+				cy,irq_gpio = <13>;
+				cy,rst_gpio = <12>;
+				cy,hid_desc_register = <1>;
+				cy,flags = <4>;
+				cy,easy_wakeup_gesture = <1>;
+				cy,btn_keys = <172 139 158 217 114 115 212 116>;
+				cy,btn_keys-tag = <0>;
+
+				cy,mt {
+					cy,name = "cyttsp5_mt";
+					cy,inp_dev_name = "cyttsp5_mt";
+					cy,flags = <0x28>;
+					cy,abs =
+						<0x35 0 320 0 0
+						0x36 0 360 0 0
+						0x3a 0 255 0 0
+						0xffff 0 255 0 0
+						0x39 0 15 0 0
+						0x30 0 255 0 0
+						0x31 0 255 0 0
+						0x34 0xffffff81 127 0 0
+						0x37 0 1 0 0
+						0x3b 0 255 0 0>;
+
+					cy,vkeys_x = <320>;
+					cy,vkeys_y = <360>;
+
+					cy,virtual_keys =
+						/* KEY_BACK */
+						<158 1360 90 160 180
+						/* KEY_MENU */
+						139 1360 270 160 180
+						/* KEY_HOMEPAGE */
+						172 1360 450 160 180
+						/* KEY SEARCH */
+						217 1360 630 160 180>;
+				};
+
+				cy,btn {
+					cy,name = "cyttsp5_btn";
+
+					cy,inp_dev_name = "cyttsp5_btn";
+				};
+
+				cy,proximity {
+					cy,name = "cyttsp5_proximity";
+
+					cy,inp_dev_name = "cyttsp5_proximity";
+					cy,abs = <0x19 0 1 0 0>;
+				};
+			};
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 6c61ada..dba61a5 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -91,11 +91,9 @@
 	  contents will not be retained. It is software responsibility to restore the
 	  SDCC registers before resuming to normal operation.
 	- qcom,force-sdhc1-probe: Force probing sdhc1 even if it is not the boot device.
-	- qcom,ddr-config: Certain chipsets and platforms require particular settings for
-			   the RCLK delay DLL configuration register for HS400 mode to work.
-			   This value can vary between platforms and msms. If a msm/platform
-			   require a different DLL setting than the default/POR setting for
-			   HS400 mode, it can be specified using this field.
+	- qcom,dll-hsr-list: List of DLL-HSR values which are tuned for given process-node
+			 and platform. The sequence of values in this list must follow the
+			 sequence listed in sdhci_msm_dll_hsr data structure.
 In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
 	- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
 	- qcom,<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 0615f87..a37bb61 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1600,7 +1600,8 @@
 * MSM8952 ASoC Machine driver
 
 Required properties:
-- compatible : "qcom,msm8952-audio-codec"
+- compatible : "qcom,msm8952-audio-codec" for pmic codec,
+				"qcom,msm8952-dig-asoc-snd" for digital internal codec,
 - qcom,model : The user-visible name of this sound card.
 - reg : Offset and length of the register region(s) for MI2S/PCM MUX
 - reg-names : Register region name(s) referenced in reg above
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index 47a213c..de028b6 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -17,7 +17,8 @@
 
 Optional properties:
 
-- gpio-reset - gpio pin number used for codec reset
+- gpio-reset - gpio pin number used for codec reset, default active low
+- reset-inverted - set the reset gpio mode as active high
 - ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
 				    - Not supported on tlv320aic3104
 - ai3x-micbias-vg - MicBias Voltage required.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index fa8b60e..c9c85bf 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -320,3 +320,4 @@
 zte	ZTE Corp.
 zyxel	ZyXEL Communications Corp.
 inven   InvenSense, Inc.
+cy      Parade.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index b640a37..564ccc6 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -125,6 +125,9 @@
 disable_ext_identify   Disable the extension list configured by mkfs, so f2fs
                        does not aware of cold files such as media files.
 inline_xattr           Enable the inline xattrs feature.
+noinline_xattr         Disable the inline xattrs feature.
+inline_xattr_size=%u   Support configuring inline xattr size, it depends on
+		       flexible inline xattr feature.
 inline_data            Enable the inline data feature: New created small(<~3.4k)
                        files can be written into inode block.
 inline_dentry          Enable the inline dir feature: data in new created
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 3a7b605..08c23b6 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -343,9 +343,9 @@
 - ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
 - ``ENOTTY``: this type of filesystem does not implement encryption
 - ``EOPNOTSUPP``: the kernel was not configured with encryption
-  support for this filesystem, or the filesystem superblock has not
+  support for filesystems, or the filesystem superblock has not
   had encryption enabled on it.  (For example, to use encryption on an
-  ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
+  ext4 filesystem, CONFIG_FS_ENCRYPTION must be enabled in the
   kernel config, and the superblock must have had the "encrypt"
   feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
   encrypt``.)
@@ -451,10 +451,18 @@
 - Unencrypted files, or files encrypted with a different encryption
   policy (i.e. different key, modes, or flags), cannot be renamed or
   linked into an encrypted directory; see `Encryption policy
-  enforcement`_.  Attempts to do so will fail with EPERM.  However,
+  enforcement`_.  Attempts to do so will fail with EXDEV.  However,
   encrypted files can be renamed within an encrypted directory, or
   into an unencrypted directory.
 
+  Note: "moving" an unencrypted file into an encrypted directory, e.g.
+  with the `mv` program, is implemented in userspace by a copy
+  followed by a delete.  Be aware that the original unencrypted data
+  may remain recoverable from free space on the disk; prefer to keep
+  all files encrypted from the very beginning.  The `shred` program
+  may be used to overwrite the source files but isn't guaranteed to be
+  effective on all filesystems and storage devices.
+
 - Direct I/O is not supported on encrypted files.  Attempts to use
   direct I/O on such files will fall back to buffered I/O.
 
@@ -541,7 +549,7 @@
 Except for those special files, it is forbidden to have unencrypted
 files, or files encrypted with a different encryption policy, in an
 encrypted directory tree.  Attempts to link or rename such a file into
-an encrypted directory will fail with EPERM.  This is also enforced
+an encrypted directory will fail with EXDEV.  This is also enforced
 during ->lookup() to provide limited protection against offline
 attacks that try to disable or downgrade encryption in known locations
 where applications may later write sensitive data.  It is recommended
diff --git a/Documentation/hw-vuln/index.rst b/Documentation/hw-vuln/index.rst
new file mode 100644
index 0000000..ffc064c
--- /dev/null
+++ b/Documentation/hw-vuln/index.rst
@@ -0,0 +1,13 @@
+========================
+Hardware vulnerabilities
+========================
+
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+   :maxdepth: 1
+
+   l1tf
+   mds
diff --git a/Documentation/l1tf.rst b/Documentation/hw-vuln/l1tf.rst
similarity index 98%
rename from Documentation/l1tf.rst
rename to Documentation/hw-vuln/l1tf.rst
index bae52b84..31653a9 100644
--- a/Documentation/l1tf.rst
+++ b/Documentation/hw-vuln/l1tf.rst
@@ -405,6 +405,9 @@
 
   off		Disables hypervisor mitigations and doesn't emit any
 		warnings.
+		It also drops the swap size and available RAM limit restrictions
+		on both hypervisor and bare metal.
+
   ============  =============================================================
 
 The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -442,6 +445,7 @@
 line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
 module parameter is ignored and writes to the sysfs file are rejected.
 
+.. _mitigation_selection:
 
 Mitigation selection guide
 --------------------------
@@ -553,7 +557,7 @@
 the bare metal hypervisor, the nested hypervisor and the nested virtual
 machine.  VMENTER operations from the nested hypervisor into the nested
 guest will always be processed by the bare metal hypervisor. If KVM is the
-bare metal hypervisor it wiil:
+bare metal hypervisor it will:
 
  - Flush the L1D cache on every switch from the nested hypervisor to the
    nested virtual machine, so that the nested hypervisor's secrets are not
@@ -576,7 +580,8 @@
   The kernel default mitigations for vulnerable processors are:
 
   - PTE inversion to protect against malicious user space. This is done
-    unconditionally and cannot be controlled.
+    unconditionally and cannot be controlled. The swap storage is limited
+    to ~16TB.
 
   - L1D conditional flushing on VMENTER when EPT is enabled for
     a guest.
diff --git a/Documentation/hw-vuln/mds.rst b/Documentation/hw-vuln/mds.rst
new file mode 100644
index 0000000..daf6fda
--- /dev/null
+++ b/Documentation/hw-vuln/mds.rst
@@ -0,0 +1,308 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+   - Processors from AMD, Centaur and other non Intel vendors
+
+   - Older processor models, where the CPU family is < 6
+
+   - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+   - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+     IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+   ==============  =====  ===================================================
+   CVE-2018-12126  MSBDS  Microarchitectural Store Buffer Data Sampling
+   CVE-2018-12130  MFBDS  Microarchitectural Fill Buffer Data Sampling
+   CVE-2018-12127  MLPDS  Microarchitectural Load Port Data Sampling
+   CVE-2019-11091  MDSUM  Microarchitectural Data Sampling Uncacheable Memory
+   ==============  =====  ===================================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+  It's unclear whether attacks through Web-Browsers are possible at
+  all. The exploitation through Java-Script is considered very unlikely,
+  but other widely used web technologies like Webassembly could possibly be
+  abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+  .. list-table::
+
+     * - 'Not affected'
+       - The processor is not vulnerable
+     * - 'Vulnerable'
+       - The processor is vulnerable, but no mitigation enabled
+     * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+       - The processor is vulnerable but microcode is not updated.
+
+         The mitigation is enabled on a best effort basis. See :ref:`vmwerv`
+     * - 'Mitigation: Clear CPU buffers'
+       - The processor is vulnerable and the CPU buffer clearing mitigation is
+         enabled.
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+    ========================  ============================================
+    'SMT vulnerable'          SMT is enabled
+    'SMT mitigated'           SMT is enabled and mitigated
+    'SMT disabled'            SMT is disabled
+    'SMT Host state unknown'  Kernel runs in a VM, Host SMT state unknown
+    ========================  ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  If the processor is vulnerable, but the availability of the microcode based
+  mitigation mechanism is not advertised via CPUID the kernel selects a best
+  effort mitigation mode.  This mode invokes the mitigation instructions
+  without a guarantee that they clear the CPU buffers.
+
+  This is done to address virtualization scenarios where the host has the
+  microcode update applied, but the hypervisor is not yet updated to expose
+  the CPUID to the guest. If the host has updated microcode the protection
+  takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+  The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+  The mitigation for MDS clears the affected CPU buffers on return to user
+  space and when entering a guest.
+
+  If SMT is enabled it also clears the buffers on idle entry when the CPU
+  is only affected by MSBDS and not any other MDS variant, because the
+  other variants cannot be protected against cross Hyper-Thread attacks.
+
+  For CPUs which are only affected by MSBDS the user space, guest and idle
+  transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  The protection for host to guest transition depends on the L1TF
+  vulnerability of the CPU:
+
+  - CPU is affected by L1TF:
+
+    If the L1D flush mitigation is enabled and up to date microcode is
+    available, the L1D flush mitigation is automatically protecting the
+    guest transition.
+
+    If the L1D flush mitigation is disabled then the MDS mitigation is
+    invoked explicit when the host MDS mitigation is enabled.
+
+    For details on L1TF and virtualization see:
+    :ref:`Documentation/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+  - CPU is not affected by L1TF:
+
+    CPU buffers are flushed before entering the guest when the host MDS
+    mitigation is enabled.
+
+  The resulting MDS protection matrix for the host to guest transition:
+
+  ============ ===== ============= ============ =================
+   L1TF         MDS   VMX-L1FLUSH   Host MDS     MDS-State
+
+   Don't care   No    Don't care    N/A          Not affected
+
+   Yes          Yes   Disabled      Off          Vulnerable
+
+   Yes          Yes   Disabled      Full         Mitigated
+
+   Yes          Yes   Enabled       Don't care   Mitigated
+
+   No           Yes   N/A           Off          Vulnerable
+
+   No           Yes   N/A           Full         Mitigated
+  ============ ===== ============= ============ =================
+
+  This only covers the host to guest transition, i.e. prevents leakage from
+  host to guest, but does not protect the guest internally. Guests need to
+  have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+  The XEON PHI processor family is affected by MSBDS which can be exploited
+  cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+  to use MWAIT in user space (Ring 3) which opens an potential attack vector
+  for malicious user space. The exposure can be disabled on the kernel
+  command line with the 'ring3mwait=disable' command line option.
+
+  XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+  before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+  either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+  All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+  means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+  disable SMT for full protection. These are most of the affected CPUs; the
+  exception is XEON PHI, see :ref:`xeon_phi`.
+
+  Disabling SMT can have a significant performance impact, but the impact
+  depends on the type of workloads.
+
+  See the relevant chapter in the L1TF mitigation documentation for details:
+  :ref:`Documentation/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+  ============  =============================================================
+  full		If the CPU is vulnerable, enable all available mitigations
+		for the MDS vulnerability, CPU buffer clearing on exit to
+		userspace and when entering a VM. Idle transitions are
+		protected as well if SMT is enabled.
+
+		It does not automatically disable SMT.
+
+  full,nosmt	The same as mds=full, with SMT disabled on vulnerable
+		CPUs.  This is the complete mitigation.
+
+  off		Disables MDS mitigations completely.
+
+  ============  =============================================================
+
+Not specifying this option is equivalent to "mds=full".
+
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+   If all userspace applications are from a trusted source and do not
+   execute untrusted code which is supplied externally, then the mitigation
+   can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+   The protection depends on the state of the L1TF mitigations.
+   See :ref:`virt_mechanism`.
+
+   If the MDS mitigation is enabled and SMT is disabled, guest to host and
+   guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+  The kernel default mitigations for vulnerable processors are:
+
+  - Enable CPU buffer clearing
+
+  The kernel does not by default enforce the disabling of SMT, which leaves
+  SMT systems vulnerable when running untrusted code. The same rationale as
+  for L1TF applies.
+  See :ref:`Documentation/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 213399a..f95c58d 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -12,7 +12,6 @@
    :maxdepth: 2
 
    kernel-documentation
-   l1tf
    development-process/index
    dev-tools/tools
    driver-api/index
@@ -20,6 +19,24 @@
    gpu/index
    80211/index
 
+This section describes CPU vulnerabilities and their mitigations.
+
+.. toctree::
+   :maxdepth: 1
+
+   hw-vuln/index
+
+Architecture-specific documentation
+-----------------------------------
+
+These books provide programming details about architecture-specific
+implementation.
+
+.. toctree::
+   :maxdepth: 2
+
+   x86/index
+
 Indices and tables
 ==================
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 719c4cd..ccbeedb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2098,10 +2098,13 @@
 			off
 				Disables hypervisor mitigations and doesn't
 				emit any warnings.
+				It also drops the swap size and available
+				RAM limit restriction on both hypervisor and
+				bare metal.
 
 			Default is 'flush'.
 
-			For details see: Documentation/admin-guide/l1tf.rst
+			For details see: Documentation/hw-vuln/l1tf.rst
 
 	l2cr=		[PPC]
 
@@ -2344,6 +2347,32 @@
 			Format: <first>,<last>
 			Specifies range of consoles to be captured by the MDA.
 
+	mds=		[X86,INTEL]
+			Control mitigation for the Micro-architectural Data
+			Sampling (MDS) vulnerability.
+
+			Certain CPUs are vulnerable to an exploit against CPU
+			internal buffers which can forward information to a
+			disclosure gadget under certain conditions.
+
+			In vulnerable processors, the speculatively
+			forwarded data can be used in a cache side channel
+			attack, to access data to which the attacker does
+			not have direct access.
+
+			This parameter controls the MDS mitigation. The
+			options are:
+
+			full       - Enable MDS mitigation on vulnerable CPUs
+			full,nosmt - Enable MDS mitigation and disable
+				     SMT on vulnerable CPUs
+			off        - Unconditionally disable MDS mitigation
+
+			Not specifying this option is equivalent to
+			mds=full.
+
+			For details see: Documentation/hw-vuln/mds.rst
+
 	mem=nn[KMG]	[KNL,BOOT] Force usage of a specific amount of memory
 			Amount of memory to be used when the kernel is not able
 			to see the whole system memory or for test.
@@ -2466,6 +2495,38 @@
 			in the "bleeding edge" mini2440 support kernel at
 			http://repo.or.cz/w/linux-2.6/mini2440.git
 
+	mitigations=
+			[X86] Control optional mitigations for CPU
+			vulnerabilities.  This is a set of curated,
+			arch-independent options, each of which is an
+			aggregation of existing arch-specific options.
+
+			off
+				Disable all optional CPU mitigations.  This
+				improves system performance, but it may also
+				expose users to several CPU vulnerabilities.
+				Equivalent to: nopti [X86]
+					       nospectre_v2 [X86]
+					       spectre_v2_user=off [X86]
+					       spec_store_bypass_disable=off [X86]
+					       l1tf=off [X86]
+					       mds=off [X86]
+
+			auto (default)
+				Mitigate all CPU vulnerabilities, but leave SMT
+				enabled, even if it's vulnerable.  This is for
+				users who don't want to be surprised by SMT
+				getting disabled across kernel upgrades, or who
+				have other ways of avoiding SMT-based attacks.
+				Equivalent to: (default behavior)
+
+			auto,nosmt
+				Mitigate all CPU vulnerabilities, disabling SMT
+				if needed.  This is for users who always want to
+				be fully mitigated, even if it means losing SMT.
+				Equivalent to: l1tf=flush,nosmt [X86]
+					       mds=full,nosmt [X86]
+
 	mminit_loglevel=
 			[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
 			parameter allows control of the logging verbosity for
@@ -2780,6 +2841,10 @@
 
 	nohugeiomap	[KNL,x86] Disable kernel huge I/O mappings.
 
+	nospectre_v1	[PPC] Disable mitigations for Spectre Variant 1 (bounds
+			check bypass). With this option data leaks are possible
+			in the system.
+
 	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
 			Equivalent to smt=1.
 
@@ -2787,7 +2852,7 @@
 			nosmt=force: Force disable SMT, cannot be undone
 				     via the sysfs control file.
 
-	nospectre_v2	[X86] Disable all mitigations for the Spectre variant 2
+	nospectre_v2	[X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
 			(indirect branch prediction) vulnerability. System may
 			allow data leaks with this option, which is equivalent
 			to spectre_v2=off.
@@ -3409,6 +3474,10 @@
 			before loading.
 			See Documentation/blockdev/ramdisk.txt.
 
+	psi=		[KNL] Enable or disable pressure stall information
+			tracking.
+			Format: <bool>
+
 	psmouse.proto=	[HW,MOUSE] Highest PS2 mouse protocol extension to
 			probe for; one of (bare|imps|exps|lifebook|any).
 	psmouse.rate=	[HW,MOUSE] Set desired mouse report rate, in reports
@@ -4056,9 +4125,13 @@
 
 	spectre_v2=	[X86] Control mitigation of Spectre variant 2
 			(indirect branch speculation) vulnerability.
+			The default operation protects the kernel from
+			user space attacks.
 
-			on   - unconditionally enable
-			off  - unconditionally disable
+			on   - unconditionally enable, implies
+			       spectre_v2_user=on
+			off  - unconditionally disable, implies
+			       spectre_v2_user=off
 			auto - kernel detects whether your CPU model is
 			       vulnerable
 
@@ -4068,6 +4141,12 @@
 			CONFIG_RETPOLINE configuration option, and the
 			compiler with which the kernel was built.
 
+			Selecting 'on' will also enable the mitigation
+			against user space to user space task attacks.
+
+			Selecting 'off' will disable both the kernel and
+			the user space protections.
+
 			Specific mitigations can also be selected manually:
 
 			retpoline	  - replace indirect branches
@@ -4077,6 +4156,48 @@
 			Not specifying this option is equivalent to
 			spectre_v2=auto.
 
+	spectre_v2_user=
+			[X86] Control mitigation of Spectre variant 2
+		        (indirect branch speculation) vulnerability between
+		        user space tasks
+
+			on	- Unconditionally enable mitigations. Is
+				  enforced by spectre_v2=on
+
+			off     - Unconditionally disable mitigations. Is
+				  enforced by spectre_v2=off
+
+			prctl   - Indirect branch speculation is enabled,
+				  but mitigation can be enabled via prctl
+				  per thread.  The mitigation control state
+				  is inherited on fork.
+
+			prctl,ibpb
+				- Like "prctl" above, but only STIBP is
+				  controlled per thread. IBPB is issued
+				  always when switching between different user
+				  space processes.
+
+			seccomp
+				- Same as "prctl" above, but all seccomp
+				  threads will enable the mitigation unless
+				  they explicitly opt out.
+
+			seccomp,ibpb
+				- Like "seccomp" above, but only STIBP is
+				  controlled per thread. IBPB is issued
+				  always when switching between different
+				  user space processes.
+
+			auto    - Kernel selects the mitigation depending on
+				  the available CPU features and vulnerability.
+
+			Default mitigation:
+			If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
+
+			Not specifying this option is equivalent to
+			spectre_v2_user=auto.
+
 	spec_store_bypass_disable=
 			[HW] Control Speculative Store Bypass (SSB) Disable mitigation
 			(Speculative Store Bypass vulnerability)
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 827622e..c88a0a8 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -405,6 +405,7 @@
 	minimum RTT when it is moved to a longer path (e.g., due to traffic
 	engineering). A longer window makes the filter more resistant to RTT
 	inflations such as transient congestion. The unit is seconds.
+	Possible values: 0 - 86400 (1 day)
 	Default: 300
 
 tcp_moderate_rcvbuf - BOOLEAN
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index d2fbeeb..54504c5 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -31,6 +31,15 @@
 Raw pointer value SHOULD be printed with %p. The kernel supports
 the following extended format specifiers for pointer types:
 
+Pointer Types:
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+	%p	abcdef12 or 00000000abcdef12
+
 Symbols/Function Pointers:
 
 	%pF	versatile_init+0x0/0x110
@@ -58,12 +67,24 @@
 
 Kernel Pointers:
 
-	%pK	0x01234567 or 0x0123456789abcdef
+	%pK	01234567 or 0123456789abcdef
 
 	For printing kernel pointers which should be hidden from unprivileged
 	users. The behaviour of %pK depends on the kptr_restrict sysctl - see
 	Documentation/sysctl/kernel.txt for more details.
 
+Unmodified Addresses:
+
+	%px	01234567 or 0123456789abcdef
+
+	For printing pointers when you _really_ want to print the address. Please
+	consider whether or not you are leaking sensitive information about the
+	Kernel layout in memory before printing pointers with %px. %px is
+	functionally equivalent to %lx. %px is preferred to %lx because it is more
+	uniquely grep'able. If, in the future, we need to modify the way the Kernel
+	handles printing pointers it will be nice to be able to find the call
+	sites.
+
 Struct Resources:
 
 	%pr	[mem 0x60000000-0x6fffffff flags 0x2200] or
diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
new file mode 100644
index 0000000..908d348
--- /dev/null
+++ b/Documentation/siphash.txt
@@ -0,0 +1,175 @@
+         SipHash - a short input PRF
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+SipHash is a cryptographically secure PRF -- a keyed hash function -- that
+performs very well for short inputs, hence the name. It was designed by
+cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
+as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
+and so forth.
+
+SipHash takes a secret key filled with randomly generated numbers and either
+an input buffer or several input integers. It spits out an integer that is
+indistinguishable from random. You may then use that integer as part of secure
+sequence numbers, secure cookies, or mask it off for use in a hash table.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+siphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u64 siphash(const void *data, size_t len, const siphash_key_t *key);
+
+And:
+
+u64 siphash_1u64(u64, const siphash_key_t *key);
+u64 siphash_2u64(u64, u64, const siphash_key_t *key);
+u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
+u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
+u64 siphash_1u32(u32, const siphash_key_t *key);
+u64 siphash_2u32(u32, u32, const siphash_key_t *key);
+u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
+u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
+
+If you pass the generic siphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+	DECLARE_HASHTABLE(hashtable, 8);
+	siphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+	get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+	return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Security
+
+SipHash has a very high security margin, with its 128-bit key. So long as the
+key is kept secret, it is impossible for an attacker to guess the outputs of
+the function, even if being able to observe many outputs, since 2^128 outputs
+is significant.
+
+Linux implements the "2-4" variant of SipHash.
+
+5. Struct-passing Pitfalls
+
+Often times the XuY functions will not be large enough, and instead you'll
+want to pass a pre-filled struct to siphash. When doing this, it's important
+to always ensure the struct has no padding holes. The easiest way to do this
+is to simply arrange the members of the struct in descending order of size,
+and to use offsetendof() instead of sizeof() for getting the size. For
+performance reasons, if possible, it's probably a good thing to align the
+struct to the right boundary. Here's an example:
+
+const struct {
+	struct in6_addr saddr;
+	u32 counter;
+	u16 dport;
+} __aligned(SIPHASH_ALIGNMENT) combined = {
+	.saddr = *(struct in6_addr *)saddr,
+	.counter = counter,
+	.dport = dport
+};
+u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
+
+6. Resources
+
+Read the SipHash paper if you're interested in learning more:
+https://131002.net/siphash/siphash.pdf
+
+
+~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
+HalfSipHash - SipHash's insecure younger cousin
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+On the off-chance that SipHash is not fast enough for your needs, you might be
+able to justify using HalfSipHash, a terrifying but potentially useful
+possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
+even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
+instead of SipHash's 128-bit key. However, this may appeal to some
+high-performance `jhash` users.
+
+Danger!
+
+Do not ever use HalfSipHash except for as a hashtable key function, and only
+then when you can be absolutely certain that the outputs will never be
+transmitted out of the kernel. This is only remotely useful over `jhash` as a
+means of mitigating hashtable flooding denial of service attacks.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+hsiphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
+
+And:
+
+u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
+u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
+
+If you pass the generic hsiphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+	DECLARE_HASHTABLE(hashtable, 8);
+	hsiphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+	get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+	return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Performance
+
+HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
+this will not be a problem, as the hashtable lookup isn't the bottleneck. And
+in general, this is probably a good sacrifice to make for the security and DoS
+resistance of HalfSipHash.
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
index 32f3d55..c4dbe6f 100644
--- a/Documentation/spec_ctrl.txt
+++ b/Documentation/spec_ctrl.txt
@@ -92,3 +92,12 @@
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
+                        (Mitigate Spectre V2 style attacks against user processes)
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 0a94ffe..b13e031 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -365,11 +365,15 @@
 then the interface is considered to be idle, and the kernel may
 autosuspend the device.
 
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface.  As a corollary, drivers must not call
-any of the usb_autopm_* functions after their disconnect() routine has
-returned.
+Drivers must be careful to balance their overall changes to the usage
+counter.  Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again.  On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
 
 Drivers using the async routines are responsible for their own
 synchronization and mutual exclusion.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3ff58a8..d1908e5 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -13,7 +13,7 @@
 
  - VM ioctls: These query and set attributes that affect an entire virtual
    machine, for example memory layout.  In addition a VM ioctl is used to
-   create virtual cpus (vcpus).
+   create virtual cpus (vcpus) and devices.
 
    Only run VM ioctls from the same process (address space) that was used
    to create the VM.
@@ -24,6 +24,11 @@
    Only run vcpu ioctls from the same thread that was used to create the
    vcpu.
 
+ - device ioctls: These query and set attributes that control the operation
+   of a single device.
+
+   device ioctls must be issued from the same process (address space) that
+   was used to create the VM.
 
 2. File descriptors
 -------------------
@@ -32,10 +37,11 @@
 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
 can be used to issue system ioctls.  A KVM_CREATE_VM ioctl on this
 handle will create a VM file descriptor which can be used to issue VM
-ioctls.  A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it.  Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls.  A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource.  Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device.  For vcpus, this includes the important
+task of actually running guest code.
 
 In general file descriptors can be migrated among processes by means
 of fork() and the SCM_RIGHTS facility of unix domain socket.  These
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
new file mode 100644
index 0000000..33c5c31
--- /dev/null
+++ b/Documentation/x86/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "X86 architecture specific documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'x86.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
new file mode 100644
index 0000000..ef389dc
--- /dev/null
+++ b/Documentation/x86/index.rst
@@ -0,0 +1,8 @@
+==========================
+x86 architecture specifics
+==========================
+
+.. toctree::
+   :maxdepth: 1
+
+   mds
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
new file mode 100644
index 0000000..5d4330b
--- /dev/null
+++ b/Documentation/x86/mds.rst
@@ -0,0 +1,193 @@
+Microarchitectural Data Sampling (MDS) mitigation
+=================================================
+
+.. _mds:
+
+Overview
+--------
+
+Microarchitectural Data Sampling (MDS) is a family of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+ - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLPDS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from
+memory that takes a fault or assist can leave data in a microarchitectural
+structure that may later be observed using one of the same methods used by
+MSBDS, MFBDS or MLPDS.
+
+Exposure assumptions
+--------------------
+
+It is assumed that attack code resides in user space or in a guest with one
+exception. The rationale behind this assumption is that the code construct
+needed for exploiting MDS requires:
+
+ - to control the load to trigger a fault or assist
+
+ - to have a disclosure gadget which exposes the speculatively accessed
+   data for consumption through a side channel.
+
+ - to control the pointer through which the disclosure gadget exposes the
+   data
+
+The existence of such a construct in the kernel cannot be excluded with
+100% certainty, but the complexity involved makes it extremly unlikely.
+
+There is one exception, which is untrusted BPF. The functionality of
+untrusted BPF is limited, but it needs to be thoroughly investigated
+whether it can be used to create such a construct.
+
+
+Mitigation strategy
+-------------------
+
+All variants have the same mitigation strategy at least for the single CPU
+thread case (SMT off): Force the CPU to clear the affected buffers.
+
+This is achieved by using the otherwise unused and obsolete VERW
+instruction in combination with a microcode update. The microcode clears
+the affected CPU buffers when the VERW instruction is executed.
+
+For virtualization there are two ways to achieve CPU buffer
+clearing. Either the modified VERW instruction or via the L1D Flush
+command. The latter is issued when L1TF mitigation is enabled so the extra
+VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
+be issued.
+
+If the VERW instruction with the supplied segment selector argument is
+executed on a CPU without the microcode update there is no side effect
+other than a small number of pointlessly wasted CPU cycles.
+
+This does not protect against cross Hyper-Thread attacks except for MSBDS
+which is only exploitable cross Hyper-thread when one of the Hyper-Threads
+enters a C-state.
+
+The kernel provides a function to invoke the buffer clearing:
+
+    mds_clear_cpu_buffers()
+
+The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+(idle) transitions.
+
+As a special quirk to address virtualization scenarios where the host has
+the microcode updated, but the hypervisor does not (yet) expose the
+MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
+hope that it might actually clear the buffers. The state is reflected
+accordingly.
+
+According to current knowledge additional mitigations inside the kernel
+itself are not required because the necessary gadgets to expose the leaked
+data cannot be controlled in a way which allows exploitation from malicious
+user space or VM guests.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ======= ============================================================
+ off      Mitigation is disabled. Either the CPU is not affected or
+          mds=off is supplied on the kernel command line
+
+ full     Mitigation is enabled. CPU is affected and MD_CLEAR is
+          advertised in CPUID.
+
+ vmwerv	  Mitigation is enabled. CPU is affected and MD_CLEAR is not
+	  advertised in CPUID. That is mainly for virtualization
+	  scenarios where the host has the updated microcode but the
+	  hypervisor does not expose MD_CLEAR in CPUID. It's a best
+	  effort approach without guarantee.
+ ======= ============================================================
+
+If the CPU is affected and mds=off is not supplied on the kernel command
+line then the kernel selects the appropriate mitigation mode depending on
+the availability of the MD_CLEAR CPUID bit.
+
+Mitigation points
+-----------------
+
+1. Return to user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+   When transitioning from kernel to user space the CPU buffers are flushed
+   on affected CPUs when the mitigation is not disabled on the kernel
+   command line. The migitation is enabled through the static key
+   mds_user_clear.
+
+   The mitigation is invoked in prepare_exit_to_usermode() which covers
+   all but one of the kernel to user space transitions.  The exception
+   is when we return from a Non Maskable Interrupt (NMI), which is
+   handled directly in do_nmi().
+
+   (The reason that NMI is special is that prepare_exit_to_usermode() can
+    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
+    enable IRQs with NMIs blocked.)
+
+
+2. C-State transition
+^^^^^^^^^^^^^^^^^^^^^
+
+   When a CPU goes idle and enters a C-State the CPU buffers need to be
+   cleared on affected CPUs when SMT is active. This addresses the
+   repartitioning of the store buffer when one of the Hyper-Threads enters
+   a C-State.
+
+   When SMT is inactive, i.e. either the CPU does not support it or all
+   sibling threads are offline CPU buffer clearing is not required.
+
+   The idle clearing is enabled on CPUs which are only affected by MSBDS
+   and not by any other MDS variant. The other MDS variants cannot be
+   protected against cross Hyper-Thread attacks because the Fill Buffer and
+   the Load Ports are shared. So on CPUs affected by other variants, the
+   idle clearing would be a window dressing exercise and is therefore not
+   activated.
+
+   The invocation is controlled by the static key mds_idle_clear which is
+   switched depending on the chosen mitigation mode and the SMT state of
+   the system.
+
+   The buffer clear is only invoked before entering the C-State to prevent
+   that stale data from the idling CPU from spilling to the Hyper-Thread
+   sibling after the store buffer got repartitioned and all entries are
+   available to the non idle sibling.
+
+   When coming out of idle the store buffer is partitioned again so each
+   sibling has half of it available. The back from idle CPU could be then
+   speculatively exposed to contents of the sibling. The buffers are
+   flushed either on exit to user space or on VMENTER so malicious code
+   in user space or the guest cannot speculatively access them.
+
+   The mitigation is hooked into all variants of halt()/mwait(), but does
+   not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
+   has been superseded by the intel_idle driver around 2010 and is
+   preferred on all affected CPUs which are expected to gain the MD_CLEAR
+   functionality in microcode. Aside of that the IO-Port mechanism is a
+   legacy interface which is only used on older systems which are either
+   not affected or do not receive microcode updates anymore.
diff --git a/MAINTAINERS b/MAINTAINERS
index 34f509a..5ea427b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11081,6 +11081,13 @@
 F:	arch/arm/mach-s3c24xx/bast-ide.c
 F:	arch/arm/mach-s3c24xx/bast-irq.c
 
+SIPHASH PRF ROUTINES
+M:	Jason A. Donenfeld <Jason@zx2c4.com>
+S:	Maintained
+F:	lib/siphash.c
+F:	lib/test_siphash.c
+F:	include/linux/siphash.h
+
 TI DAVINCI MACHINE SUPPORT
 M:	Sekhar Nori <nsekhar@ti.com>
 M:	Kevin Hilman <khilman@kernel.org>
diff --git a/Makefile b/Makefile
index f5e6a01..5e75855 100755
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 163
+SUBLEVEL = 179
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -516,7 +516,7 @@
 ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
 $(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
 endif
-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
 CLANG_FLAGS	+= --prefix=$(GCC_TOOLCHAIN_DIR)
 GCC_TOOLCHAIN	:= $(realpath $(GCC_TOOLCHAIN_DIR)/..)
 endif
@@ -662,7 +662,9 @@
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS	+= $(call cc-option,-fno-PIE)
-CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
+CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage \
+	$(call cc-option,-fno-tree-loop-im) \
+	$(call cc-disable-warning,maybe-uninitialized,)
 CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
 export CFLAGS_GCOV CFLAGS_KCOV
 
@@ -720,7 +722,7 @@
 endif
 
 ifdef CONFIG_CFI_CLANG
-cfi-clang-flags	+= -fsanitize=cfi
+cfi-clang-flags	+= -fsanitize=cfi $(call cc-option, -fsplit-lto-unit)
 DISABLE_CFI_CLANG := -fno-sanitize=cfi
 ifdef CONFIG_MODULES
 cfi-clang-flags	+= -fsanitize-cfi-cross-dso
@@ -747,8 +749,7 @@
 endif
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS	+= $(call cc-option,-Oz,-Os)
-KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
 else
 ifdef CONFIG_PROFILE_ALL_BRANCHES
 KBUILD_CFLAGS	+= -O2 $(call cc-disable-warning,maybe-uninitialized,)
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 0684fd2..f82393f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -209,7 +209,7 @@
 		*/
 		  "=&r" (tmp), "+r" (to), "+r" (from)
 		:
-		: "lp_count", "lp_start", "lp_end", "memory");
+		: "lp_count", "memory");
 
 		return n;
 	}
@@ -438,7 +438,7 @@
 		 */
 		  "=&r" (tmp), "+r" (to), "+r" (from)
 		:
-		: "lp_count", "lp_start", "lp_end", "memory");
+		: "lp_count", "memory");
 
 		return n;
 	}
@@ -658,7 +658,7 @@
 	"	.previous			\n"
 	: "+r"(d_char), "+r"(res)
 	: "i"(0)
-	: "lp_count", "lp_start", "lp_end", "memory");
+	: "lp_count", "memory");
 
 	return res;
 }
@@ -691,7 +691,7 @@
 	"	.previous			\n"
 	: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
 	: "g"(-EFAULT), "r"(count)
-	: "lp_count", "lp_start", "lp_end", "memory");
+	: "lp_count", "memory");
 
 	return res;
 }
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 1f945d0..208bf2c 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -107,6 +107,7 @@
 	;    r2 = pointer to uboot provided cmdline or external DTB in mem
 	; These are handled later in handle_uboot_args()
 	st	r0, [@uboot_tag]
+	st      r1, [@uboot_magic]
 	st	r2, [@uboot_arg]
 #endif
 
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 9119bea..9f96120 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -32,6 +32,7 @@
 
 /* Part of U-boot ABI: see head.S */
 int __initdata uboot_tag;
+int __initdata uboot_magic;
 char __initdata *uboot_arg;
 
 const struct machine_desc *machine_desc;
@@ -400,6 +401,8 @@
 #define UBOOT_TAG_NONE		0
 #define UBOOT_TAG_CMDLINE	1
 #define UBOOT_TAG_DTB		2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE	0
 
 void __init handle_uboot_args(void)
 {
@@ -415,6 +418,11 @@
 		goto ignore_uboot_args;
 	}
 
+	if (uboot_magic != UBOOT_MAGIC_VALUE) {
+		pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+		goto ignore_uboot_args;
+	}
+
 	if (uboot_tag != UBOOT_TAG_NONE &&
             uboot_arg_invalid((unsigned long)uboot_arg)) {
 		pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044d..ea14b0b 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
 #endif
 
 #ifdef CONFIG_ARC_HAS_LL64
-# define PREFETCH_READ(RX)	prefetch    [RX, 56]
-# define PREFETCH_WRITE(RX)	prefetchw   [RX, 64]
 # define LOADX(DST,RX)		ldd.ab	DST, [RX, 8]
 # define STOREX(SRC,RX)		std.ab	SRC, [RX, 8]
 # define ZOLSHFT		5
 # define ZOLAND			0x1F
 #else
-# define PREFETCH_READ(RX)	prefetch    [RX, 28]
-# define PREFETCH_WRITE(RX)	prefetchw   [RX, 32]
 # define LOADX(DST,RX)		ld.ab	DST, [RX, 4]
 # define STOREX(SRC,RX)		st.ab	SRC, [RX, 4]
 # define ZOLSHFT		4
@@ -41,8 +37,6 @@
 #endif
 
 ENTRY_CFI(memcpy)
-	prefetch [r1]		; Prefetch the read location
-	prefetchw [r0]		; Prefetch the write location
 	mov.f	0, r2
 ;;; if size is zero
 	jz.d	[blink]
@@ -72,8 +66,6 @@
 	lpnz	@.Lcopy32_64bytes
 	;; LOOP START
 	LOADX (r6, r1)
-	PREFETCH_READ (r1)
-	PREFETCH_WRITE (r3)
 	LOADX (r8, r1)
 	LOADX (r10, r1)
 	LOADX (r4, r1)
@@ -117,9 +109,7 @@
 	lpnz	@.Lcopy8bytes_1
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
-	prefetch [r1, 28]	;Prefetch the next read location
 	ld.ab	r8, [r1,4]
-	prefetchw [r3, 32]	;Prefetch the next write location
 
 	SHIFT_1	(r7, r6, 24)
 	or	r7, r7, r5
@@ -162,9 +152,7 @@
 	lpnz	@.Lcopy8bytes_2
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
-	prefetch [r1, 28]	;Prefetch the next read location
 	ld.ab	r8, [r1,4]
-	prefetchw [r3, 32]	;Prefetch the next write location
 
 	SHIFT_1	(r7, r6, 16)
 	or	r7, r7, r5
@@ -204,9 +192,7 @@
 	lpnz	@.Lcopy8bytes_3
 	;; LOOP START
 	ld.ab	r6, [r1, 4]
-	prefetch [r1, 28]	;Prefetch the next read location
 	ld.ab	r8, [r1,4]
-	prefetchw [r3, 32]	;Prefetch the next write location
 
 	SHIFT_1	(r7, r6, 8)
 	or	r7, r7, r5
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 353563e..628715e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1502,6 +1502,7 @@
 	bool "Support for hot-pluggable CPUs"
 	select GENERIC_IRQ_MIGRATION
 	depends on SMP
+	select GENERIC_IRQ_MIGRATION
 	help
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 3f7d1b7..a17ca8d 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,7 +17,8 @@
 		@ there.
 		.inst	'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-		W(mov)	r0, r0
+ AR_CLASS(	mov	r0, r0		)
+  M_CLASS(	nop.w			)
 #endif
 		.endm
 
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 38ed62f..f1b4ea9 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1385,7 +1385,21 @@
 
 		@ Preserve return value of efi_entry() in r4
 		mov	r4, r0
-		bl	cache_clean_flush
+
+		@ our cache maintenance code relies on CP15 barrier instructions
+		@ but since we arrived here with the MMU and caches configured
+		@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+		@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+		@ the enable path will be executed on v7+ only.
+		mrc	p15, 0, r1, c1, c0, 0	@ read SCTLR
+		tst	r1, #(1 << 5)		@ CP15BEN bit set?
+		bne	0f
+		orr	r1, r1, #(1 << 5)	@ CP15 barrier instructions
+		mcr	p15, 0, r1, c1, c0, 0	@ write SCTLR
+ ARM(		.inst	0xf57ff06f		@ v7+ isb	)
+ THUMB(		isb						)
+
+0:		bl	cache_clean_flush
 		bl	cache_off
 
 		@ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 84df85e..7efde03 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -26,5 +26,5 @@
 };
 
 &hdmi {
-	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index e0280cac2..fed72a5 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -90,6 +90,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
+	phy-reset-duration = <10>; /* in msecs */
 	phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
 	phy-supply = <&vdd_eth_io_reg>;
 	status = "disabled";
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index b5841fa..0d20aad 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -230,7 +230,7 @@
 				status = "disabled";
 			};
 
-			i2s1: i2s@2009C000 {
+			i2s1: i2s@2009c000 {
 				compatible = "nxp,lpc3220-i2s";
 				reg = <0x2009C000 0x1000>;
 			};
@@ -273,7 +273,7 @@
 				status = "disabled";
 			};
 
-			i2c1: i2c@400A0000 {
+			i2c1: i2c@400a0000 {
 				compatible = "nxp,pnx-i2c";
 				reg = <0x400A0000 0x100>;
 				interrupt-parent = <&sic1>;
@@ -284,7 +284,7 @@
 				clocks = <&clk LPC32XX_CLK_I2C1>;
 			};
 
-			i2c2: i2c@400A8000 {
+			i2c2: i2c@400a8000 {
 				compatible = "nxp,pnx-i2c";
 				reg = <0x400A8000 0x100>;
 				interrupt-parent = <&sic1>;
@@ -295,7 +295,7 @@
 				clocks = <&clk LPC32XX_CLK_I2C2>;
 			};
 
-			mpwm: mpwm@400E8000 {
+			mpwm: mpwm@400e8000 {
 				compatible = "nxp,lpc3220-motor-pwm";
 				reg = <0x400E8000 0x78>;
 				status = "disabled";
@@ -394,7 +394,7 @@
 				#gpio-cells = <3>; /* bank, pin, flags */
 			};
 
-			timer4: timer@4002C000 {
+			timer4: timer@4002c000 {
 				compatible = "nxp,lpc3220-timer";
 				reg = <0x4002C000 0x1000>;
 				interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -412,7 +412,7 @@
 				status = "disabled";
 			};
 
-			watchdog: watchdog@4003C000 {
+			watchdog: watchdog@4003c000 {
 				compatible = "nxp,pnx4008-wdt";
 				reg = <0x4003C000 0x1000>;
 				clocks = <&clk LPC32XX_CLK_WDOG>;
@@ -451,7 +451,7 @@
 				status = "disabled";
 			};
 
-			timer1: timer@4004C000 {
+			timer1: timer@4004c000 {
 				compatible = "nxp,lpc3220-timer";
 				reg = <0x4004C000 0x1000>;
 				interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -475,14 +475,14 @@
 				status = "disabled";
 			};
 
-			pwm1: pwm@4005C000 {
+			pwm1: pwm@4005c000 {
 				compatible = "nxp,lpc3220-pwm";
 				reg = <0x4005C000 0x4>;
 				clocks = <&clk LPC32XX_CLK_PWM1>;
 				status = "disabled";
 			};
 
-			pwm2: pwm@4005C004 {
+			pwm2: pwm@4005c004 {
 				compatible = "nxp,lpc3220-pwm";
 				reg = <0x4005C004 0x4>;
 				clocks = <&clk LPC32XX_CLK_PWM2>;
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 8b14c87..17524cf 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -16,9 +16,11 @@
 	sdxpoorwills-v2-cdp.dtb \
 	sdxpoorwills-v2-dualwifi-mtp.dtb \
 	sdxpoorwills-v2-dualwifi-cdp.dtb \
-	sdxpoorwills-adp.dtb \
-	sdxpoorwills-pcie-ep-adp.dtb \
-	sdxpoorwills-usb-ep-adp.dtb
+	sdxpoorwills-ccard.dtb \
+	sdxpoorwills-ccard-pcie-ep.dtb \
+	sdxpoorwills-ccard-usb-ep.dtb \
+	sdxpoorwills-v2-pcie-ep-mtp-256.dtb \
+	sdxpoorwills-v2-pcie-ep-mtp.dtb
 
 dtb-$(CONFIG_ARCH_MDM9650) += mdm9650-nand-mtp.dtb \
 	mdm9650-ttp.dtb \
diff --git a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
index 8f7edab..08e306d 100644
--- a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -55,19 +55,21 @@
 		pmxpoorwills_gpios: pinctrl@c000 {
 			compatible = "qcom,spmi-gpio";
 			reg = <0xc000 0x900>;
-			interrupts = <0x0 0xc1 0 IRQ_TYPE_NONE>,
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+					<0x0 0xc1 0 IRQ_TYPE_NONE>,
 					<0x0 0xc2 0 IRQ_TYPE_NONE>,
 					<0x0 0xc3 0 IRQ_TYPE_NONE>,
 					<0x0 0xc4 0 IRQ_TYPE_NONE>,
 					<0x0 0xc5 0 IRQ_TYPE_NONE>;
-			interrupt-names = "pmxpoorwills_gpio2",
+			interrupt-names = "pmxpoorwills_gpio1",
+					  "pmxpoorwills_gpio2",
 					  "pmxpoorwills_gpio3",
 					  "pmxpoorwills_gpio4",
 					  "pmxpoorwills_gpio5",
 					  "pmxpoorwills_gpio6";
 			gpio-controller;
 			#gpio-cells = <2>;
-			qcom,gpios-disallowed = <1 7 8 9>;
+			qcom,gpios-disallowed = <7 8 9>;
 		};
 
 		pmxpoorwills_rtc: qcom,pmxpoorwills_rtc {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ccard-pcie-ep.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard-pcie-ep.dts
new file mode 100644
index 0000000..0e712a1
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard-pcie-ep.dts
@@ -0,0 +1,50 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdxpoorwills-ccard.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SA415M CCARD PCIE-EP";
+	compatible = "qcom,sdxpoorwills-ccard",
+		"qcom,sdxpoorwills", "qcom,ccard";
+	qcom,board-id = <25 1>, <25 0x101>;
+};
+
+&usb {
+	/delete-property/ iommus;
+};
+
+&pcie_ep {
+	status = "okay";
+};
+
+&ipa_hw {
+	qcom,use-ipa-in-mhi-mode;
+};
+
+&cnss_pcie {
+	status = "disabled";
+};
+
+&pcie0 {
+	status = "disabled";
+};
+
+&mhi_device {
+	status = "okay";
+};
+
+&restart_pshold {
+	qcom,force-warm-reboot;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard-usb-ep.dts
similarity index 73%
rename from arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
rename to arch/arm/boot/dts/qcom/sdxpoorwills-ccard-usb-ep.dts
index 0fb75a3..9f0764e 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard-usb-ep.dts
@@ -12,15 +12,11 @@
 
 /dts-v1/;
 
-#include "sdxpoorwills-adp.dtsi"
+#include "sdxpoorwills-ccard.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDXPOORWILLS PCIE-EP ADP";
-	compatible = "qcom,sdxpoorwills-adp",
-		"qcom,sdxpoorwills", "qcom,adp";
+	model = "Qualcomm Technologies, Inc. SA415M CCARD USB-EP";
+	compatible = "qcom,sdxpoorwills-ccard",
+		"qcom,sdxpoorwills", "qcom,ccard";
 	qcom,board-id = <25 2>, <25 0x102>;
 };
-
-&blsp1_uart2b_hs {
-	status = "okay";
-};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-adp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dts
similarity index 77%
rename from arch/arm/boot/dts/qcom/sdxpoorwills-adp.dts
rename to arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dts
index 369929b..b9acf1c 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-adp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dts
@@ -12,11 +12,11 @@
 
 /dts-v1/;
 
-#include "sdxpoorwills-adp.dtsi"
+#include "sdxpoorwills-ccard.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDXPOORWILLS ADP";
-	compatible = "qcom,sdxpoorwills-adp",
-		"qcom,sdxpoorwills", "qcom,adp";
+	model = "Qualcomm Technologies, Inc. SA415M CCARD";
+	compatible = "qcom,sdxpoorwills-ccard",
+		"qcom,sdxpoorwills", "qcom,ccard";
 	qcom,board-id = <25 0>, <25 0x100>;
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-adp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dtsi
similarity index 61%
rename from arch/arm/boot/dts/qcom/sdxpoorwills-adp.dtsi
rename to arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dtsi
index 683c30f..0ad0e0b 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ccard.dtsi
@@ -10,7 +10,12 @@
  * GNU General Public License for more details.
  */
 
-#include "sdxpoorwills-mtp.dtsi"
+#include "sdxpoorwills-ttp.dtsi"
+#include "sdxpoorwills-v2.dtsi"
+
+&blsp1_uart2b_hs {
+	status = "okay";
+};
 
 &qcom_seecom {
 	status = "okay";
@@ -31,3 +36,37 @@
 &mss_mem {
 	reg = <0x86400000 0x9300000>;
 };
+
+&i2c_4 {
+	asm330@6a {
+		reg = <0x6b>;
+	};
+};
+
+&wcd934x_cdc {
+	status = "disabled";
+};
+
+&wcd9xxx_intc {
+	status = "disabled";
+};
+
+&wcd_rst_gpio {
+	status = "disabled";
+};
+
+&snd_934x {
+	status = "disabled";
+};
+
+&smb138x {
+	status = "disabled";
+};
+
+&smb138x_vbus {
+	status = "disabled";
+};
+
+&vreg_wlan {
+	gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb-ep-adp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-usb-ep-adp.dts
deleted file mode 100644
index 1158fd7..0000000
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-usb-ep-adp.dts
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "sdxpoorwills-adp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDXPOORWILLS USB-EP ADP";
-	compatible = "qcom,sdxpoorwills-adp",
-		"qcom,sdxpoorwills", "qcom,adp";
-	qcom,board-id = <25 1>, <25 0x101>;
-};
-
-&blsp1_uart2b_hs {
-	status = "okay";
-};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp-256.dts
similarity index 70%
copy from arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
copy to arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp-256.dts
index 0fb75a3..88a42b4 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp-256.dts
@@ -12,13 +12,14 @@
 
 /dts-v1/;
 
-#include "sdxpoorwills-adp.dtsi"
+#include "sdxpoorwills-pcie-ep-mtp.dtsi"
+#include "sdxpoorwills-v2.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDXPOORWILLS PCIE-EP ADP";
-	compatible = "qcom,sdxpoorwills-adp",
-		"qcom,sdxpoorwills", "qcom,adp";
-	qcom,board-id = <25 2>, <25 0x102>;
+	model = "Qualcomm Technologies, Inc. SDXPOORWILLS(M) V2 PCIE-EP MTP";
+	compatible = "qcom,sdxpoorwills-mtp",
+		"qcom,sdxpoorwills", "qcom,mtp";
+	qcom,board-id = <8 0x5>;
 };
 
 &blsp1_uart2b_hs {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp.dts
similarity index 70%
copy from arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
copy to arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp.dts
index 0fb75a3..2ac6605 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-adp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-v2-pcie-ep-mtp.dts
@@ -12,13 +12,14 @@
 
 /dts-v1/;
 
-#include "sdxpoorwills-adp.dtsi"
+#include "sdxpoorwills-pcie-ep-mtp.dtsi"
+#include "sdxpoorwills-v2.dtsi"
 
 / {
-	model = "Qualcomm Technologies, Inc. SDXPOORWILLS PCIE-EP ADP";
-	compatible = "qcom,sdxpoorwills-adp",
-		"qcom,sdxpoorwills", "qcom,adp";
-	qcom,board-id = <25 2>, <25 0x102>;
+	model = "Qualcomm Technologies, Inc. SDXPOORWILLS V2 PCIE-EP MTP";
+	compatible = "qcom,sdxpoorwills-mtp",
+		"qcom,sdxpoorwills", "qcom,mtp";
+	qcom,board-id = <8 0x105>;
 };
 
 &blsp1_uart2b_hs {
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-v2.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-v2.dtsi
index eea825f..e7f9a72 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-v2.dtsi
@@ -14,7 +14,7 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDXPOORWILLS V2";
 	compatible = "qcom,sdxpoorwills";
-	qcom,msm-id = <334 0x20000>, <335 0x20000>;
+	qcom,msm-id = <334 0x20000>, <335 0x20000>, <408 0x20000>;
 };
 
 &emac_hw {
@@ -107,3 +107,68 @@
 			0x0800 0x00 0x0
 			0x0844 0x03 0x0>;
 };
+
+&pcie_ep {
+	qcom,pcie-phy-ver = <2112>;
+
+	qcom,phy-init = <0x840 0x001 0x0 0x1
+			0x094 0x000 0x0 0x1
+			0x058 0x00f 0x0 0x1
+			0x0a4 0x042 0x0 0x1
+			0x110 0x024 0x0 0x1
+			0x1bc 0x011 0x0 0x1
+			0x0bc 0x019 0x0 0x1
+			0x0b0 0x004 0x0 0x1
+			0x0ac 0x0ff 0x0 0x1
+			0x158 0x001 0x0 0x1
+			0x074 0x036 0x0 0x1
+			0x07c 0x012 0x0 0x1
+			0x084 0x000 0x0 0x1
+			0x1b0 0x01d 0x0 0x1
+			0x1ac 0x056 0x0 0x1
+			0x04c 0x007 0x0 0x1
+			0x050 0x007 0x0 0x1
+			0x0f0 0x001 0x0 0x1
+			0x0ec 0x0fb 0x0 0x1
+			0x00c 0x002 0x0 0x1
+			0x29c 0x012 0x0 0x1
+			0x284 0x005 0x0 0x1
+			0x51c 0x003 0x0 0x1
+			0x518 0x01c 0x0 0x1
+			0x524 0x014 0x0 0x1
+			0x4ec 0x00e 0x0 0x1
+			0x4f0 0x04a 0x0 0x1
+			0x4f4 0x00f 0x0 0x1
+			0x5b4 0x004 0x0 0x1
+			0x434 0x07f 0x0 0x1
+			0x444 0x070 0x0 0x1
+			0x510 0x017 0x0 0x1
+			0x4d8 0x001 0x0 0x1
+			0x598 0x0d4 0x0 0x1
+			0x59c 0x054 0x0 0x1
+			0x5a0 0x0db 0x0 0x1
+			0x5a4 0x0b9 0x0 0x1
+			0x5a8 0x031 0x0 0x1
+			0x584 0x024 0x0 0x1
+			0x588 0x0e4 0x0 0x1
+			0x58c 0x0ec 0x0 0x1
+			0x590 0x0b9 0x0 0x1
+			0x594 0x036 0x0 0x1
+			0x570 0x0ef 0x0 0x1
+			0x574 0x0ef 0x0 0x1
+			0x578 0x02f 0x0 0x1
+			0x57c 0x0d3 0x0 0x1
+			0x580 0x040 0x0 0x1
+			0x4fc 0x000 0x0 0x1
+			0x4f8 0x0c0 0x0 0x1
+			0x414 0x004 0x0 0x1
+			0x9a4 0x001 0x0 0x1
+			0xc40 0x001 0x0 0x1
+			0xc48 0x001 0x0 0x1
+			0x988 0x066 0x0 0x1
+			0x998 0x008 0x0 0x1
+			0x8dc 0x00d 0x0 0x1
+			0x9ec 0x001 0x0 0x1
+			0x800 0x000 0x0 0x1
+			0x844 0x003 0x0 0x1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 537e412..fa37005 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -510,6 +510,16 @@
 		status = "disabled";
 	};
 
+	qcom,ipc_router_mhi_dev_xprt {
+		compatible = "qcom,ipc-router-mhi-dev-xprt";
+		qcom,out-chan-id = <20>;
+		qcom,in-chan-id = <21>;
+		qcom,xprt-remote = "external-ap";
+		qcom,xprt-linkid = <2>;
+		qcom,xprt-version = <1>;
+		status = "disabled";
+	};
+
 	gdsc_emac: qcom,gdsc@147004 {
 		compatible = "qcom,gdsc";
 		regulator-name = "gdsc_emac";
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 8a394f3..ee65702 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -517,7 +517,7 @@
 #define PIN_PC9__GPIO			PINMUX_PIN(PIN_PC9, 0, 0)
 #define PIN_PC9__FIQ			PINMUX_PIN(PIN_PC9, 1, 3)
 #define PIN_PC9__GTSUCOMP		PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0			PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0			PINMUX_PIN(PIN_PC9, 3, 1)
 #define PIN_PC9__TIOA4			PINMUX_PIN(PIN_PC9, 4, 2)
 #define PIN_PC10			74
 #define PIN_PC10__GPIO			PINMUX_PIN(PIN_PC10, 0, 0)
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 557e5c3..7d7278a 100755
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -20,7 +20,6 @@
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
-CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_SCHED_TUNE=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index 07a287e..9a7f92e 100755
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -22,7 +22,6 @@
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
-CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_SCHED_TUNE=y
diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig
index e217631..f59f38c 100644
--- a/arch/arm/configs/ranchu_defconfig
+++ b/arch/arm/configs/ranchu_defconfig
@@ -185,7 +185,6 @@
 CONFIG_TABLET_USB_HANWANG=y
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=y
 # CONFIG_SERIO_SERPORT is not set
diff --git a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
index f8a24a2..6d87a92 100644
--- a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
@@ -64,6 +64,7 @@
 CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_DEBUG=y
 CONFIG_NF_CONNTRACK=y
@@ -299,6 +300,7 @@
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TLV320AIC3X=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_ELECOM=y
@@ -335,6 +337,7 @@
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
@@ -404,6 +407,7 @@
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_USB_XPRT=y
+CONFIG_MSM_IPC_ROUTER_MHI_DEV_XPRT=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm/configs/sdxpoorwills-auto_defconfig b/arch/arm/configs/sdxpoorwills-auto_defconfig
index b2c0ca7..d5544bd 100644
--- a/arch/arm/configs/sdxpoorwills-auto_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto_defconfig
@@ -66,6 +66,7 @@
 CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_DEBUG=y
 CONFIG_NF_CONNTRACK=y
@@ -300,6 +301,7 @@
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TLV320AIC3X=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_ELECOM=y
@@ -337,6 +339,7 @@
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ECM=y
 CONFIG_USB_CONFIGFS_RNDIS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
@@ -410,6 +413,7 @@
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_USB_XPRT=y
+CONFIG_MSM_IPC_ROUTER_MHI_DEV_XPRT=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
@@ -417,6 +421,7 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_PM=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QTI_RPM_STATS_LOG=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index fd6868c..ecf6627 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -66,6 +66,7 @@
 CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_DEBUG=y
 CONFIG_NF_CONNTRACK=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 2b02a48..f892b1a 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -68,6 +68,7 @@
 CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
 CONFIG_IPV6_PIMSM_V2=y
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_DEBUG=y
 CONFIG_NF_CONNTRACK=y
diff --git a/arch/arm/configs/spyro-perf_defconfig b/arch/arm/configs/spyro-perf_defconfig
index 5a2e6c7..cfefe78 100644
--- a/arch/arm/configs/spyro-perf_defconfig
+++ b/arch/arm/configs/spyro-perf_defconfig
@@ -305,6 +305,7 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -359,6 +360,7 @@
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_CPR=y
 CONFIG_REGULATOR_CPR4_APSS=y
 CONFIG_REGULATOR_CPRH_KBSS=y
@@ -548,6 +550,7 @@
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
 CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
 CONFIG_QTI_MPM=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/configs/spyro_defconfig b/arch/arm/configs/spyro_defconfig
index 2f84f60..421e21c 100644
--- a/arch/arm/configs/spyro_defconfig
+++ b/arch/arm/configs/spyro_defconfig
@@ -312,6 +312,7 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -368,6 +369,7 @@
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_CPR=y
 CONFIG_REGULATOR_CPR4_APSS=y
 CONFIG_REGULATOR_CPRH_KBSS=y
@@ -567,6 +569,7 @@
 CONFIG_SPDM_SCM=y
 CONFIG_DEVFREQ_SPDM=y
 CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_QTI_MPM=y
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 5d934a0..cb2486a 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -265,6 +265,8 @@
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+	if (err)
+		return err;
 
 	/* generate the initial tweak */
 	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -289,6 +291,8 @@
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+	if (err)
+		return err;
 
 	/* generate the initial tweak */
 	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index fac0533..f64e841 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -205,10 +205,11 @@
 .global	sha256_block_data_order
 .type	sha256_block_data_order,%function
 sha256_block_data_order:
+.Lsha256_block_data_order:
 #if __ARM_ARCH__<7
 	sub	r3,pc,#8		@ sha256_block_data_order
 #else
-	adr	r3,sha256_block_data_order
+	adr	r3,.Lsha256_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 	ldr	r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 555a1a8..72c2480 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -86,10 +86,11 @@
 .global	sha256_block_data_order
 .type	sha256_block_data_order,%function
 sha256_block_data_order:
+.Lsha256_block_data_order:
 #if __ARM_ARCH__<7
 	sub	r3,pc,#8		@ sha256_block_data_order
 #else
-	adr	r3,sha256_block_data_order
+	adr	r3,.Lsha256_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 	ldr	r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index a2b11a8..5fe3364 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -267,10 +267,11 @@
 .global	sha512_block_data_order
 .type	sha512_block_data_order,%function
 sha512_block_data_order:
+.Lsha512_block_data_order:
 #if __ARM_ARCH__<7
 	sub	r3,pc,#8		@ sha512_block_data_order
 #else
-	adr	r3,sha512_block_data_order
+	adr	r3,.Lsha512_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 	ldr	r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index 3694c4d..de9bd7f 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -134,10 +134,11 @@
 .global	sha512_block_data_order
 .type	sha512_block_data_order,%function
 sha512_block_data_order:
+.Lsha512_block_data_order:
 #if __ARM_ARCH__<7
 	sub	r3,pc,#8		@ sha512_block_data_order
 #else
-	adr	r3,sha512_block_data_order
+	adr	r3,.Lsha512_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
 	ldr	r12,.LOPENSSL_armcap
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 513e03d..8331cb0 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -10,6 +10,8 @@
 #define sev()	__asm__ __volatile__ ("sev" : : : "memory")
 #define wfe()	__asm__ __volatile__ ("wfe" : : : "memory")
 #define wfi()	__asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe()	do { } while (0)
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index e53638c..61e1d08 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -24,7 +24,6 @@
 #ifndef __ASSEMBLY__
 struct irqaction;
 struct pt_regs;
-extern void migrate_irqs(void);
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b2c5d79..f59a196 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -80,7 +80,11 @@
 unsigned long get_wchan(struct task_struct *p);
 
 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax()			smp_mb()
+#define cpu_relax()						\
+	do {							\
+		smp_mb();					\
+		__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;");	\
+	} while (0)
 #else
 #define cpu_relax()			barrier()
 #endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
old mode 100644
new mode 100755
index 42d3974..407527f
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
-#include <linux/ratelimit.h>
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/kallsyms.h>
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index b18c1ea..ef6b27f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -87,8 +87,11 @@
 
 	set_cpu_online(smp_processor_id(), false);
 	atomic_dec(&waiting_for_crash_ipi);
-	while (1)
+
+	while (1) {
 		cpu_relax();
+		wfe();
+	}
 }
 
 static void machine_kexec_mask_interrupts(void)
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 69bda1a..1f665ac 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -15,7 +15,7 @@
 	unsigned int insn;
 };
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
 	__acquires(&patch_lock)
@@ -32,7 +32,7 @@
 		return addr;
 
 	if (flags)
-		spin_lock_irqsave(&patch_lock, *flags);
+		raw_spin_lock_irqsave(&patch_lock, *flags);
 	else
 		__acquire(&patch_lock);
 
@@ -47,7 +47,7 @@
 	clear_fixmap(fixmap);
 
 	if (flags)
-		spin_unlock_irqrestore(&patch_lock, *flags);
+		raw_spin_unlock_irqrestore(&patch_lock, *flags);
 	else
 		__release(&patch_lock);
 }
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b7a75a0..9316300429 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -254,7 +254,7 @@
 	/*
 	 * OK - migrate IRQs away from this CPU
 	 */
-	migrate_irqs();
+	irq_migrate_all_off_this_cpu();
 
 	/*
 	 * Flush user cache and TLB mappings, and then remove this CPU
@@ -615,8 +615,10 @@
 	local_fiq_disable();
 	local_irq_disable();
 
-	while (1)
+	while (1) {
 		cpu_relax();
+		wfe();
+	}
 }
 
 static DEFINE_PER_CPU(struct completion *, cpu_completion);
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 0bee233..314cfb2 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -93,7 +93,7 @@
 static const struct unwind_idx *__origin_unwind_idx;
 extern const struct unwind_idx __stop_unwind_idx[];
 
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
 
 /* Convert a prel31 symbol to an absolute address */
@@ -201,7 +201,7 @@
 		/* module unwind tables */
 		struct unwind_table *table;
 
-		spin_lock_irqsave(&unwind_lock, flags);
+		raw_spin_lock_irqsave(&unwind_lock, flags);
 		list_for_each_entry(table, &unwind_tables, list) {
 			if (addr >= table->begin_addr &&
 			    addr < table->end_addr) {
@@ -213,7 +213,7 @@
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&unwind_lock, flags);
+		raw_spin_unlock_irqrestore(&unwind_lock, flags);
 	}
 
 	pr_debug("%s: idx = %p\n", __func__, idx);
@@ -529,9 +529,9 @@
 	tab->begin_addr = text_addr;
 	tab->end_addr = text_addr + text_size;
 
-	spin_lock_irqsave(&unwind_lock, flags);
+	raw_spin_lock_irqsave(&unwind_lock, flags);
 	list_add_tail(&tab->list, &unwind_tables);
-	spin_unlock_irqrestore(&unwind_lock, flags);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
 	return tab;
 }
@@ -543,9 +543,9 @@
 	if (!tab)
 		return;
 
-	spin_lock_irqsave(&unwind_lock, flags);
+	raw_spin_lock_irqsave(&unwind_lock, flags);
 	list_del(&tab->list);
-	spin_unlock_irqrestore(&unwind_lock, flags);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
 	kfree(tab);
 }
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index a670c70..dfc00a5 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -801,7 +801,7 @@
 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
 			       const struct kvm_vcpu_init *init)
 {
-	unsigned int i;
+	unsigned int i, ret;
 	int phys_target = kvm_target_cpu();
 
 	if (init->target != phys_target)
@@ -836,9 +836,14 @@
 	vcpu->arch.target = phys_target;
 
 	/* Now we know what it is, we can reset it. */
-	return kvm_reset_vcpu(vcpu);
-}
+	ret = kvm_reset_vcpu(vcpu);
+	if (ret) {
+		vcpu->arch.target = -1;
+		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+	}
 
+	return ret;
+}
 
 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 					 struct kvm_vcpu_init *init)
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 27f4d962..b3ecffb 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -38,7 +38,7 @@
 $(obj)/csumpartialcopyuser.o:	$(obj)/csumpartialcopygeneric.S
 
 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
-  NEON_FLAGS			:= -mfloat-abi=softfp -mfpu=neon
+  NEON_FLAGS			:= -march=armv7-a -mfloat-abi=softfp -mfpu=neon
   CFLAGS_xor-neon.o		+= $(NEON_FLAGS)
   obj-$(CONFIG_XOR_BLOCKS)	+= xor-neon.o
 endif
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 2c40aea..c691b90 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -14,7 +14,7 @@
 MODULE_LICENSE("GPL");
 
 #ifndef __ARM_NEON__
-#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
 #endif
 
 /*
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index fd6da54..2199c3a 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -205,6 +205,7 @@
 		return;
 
 	addr = of_get_address(nd, 0, NULL, NULL);
+	of_node_put(nd);
 	if (!addr) {
 		pr_err("%s: No address specified.\n", __func__);
 		return;
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 3e1430a..81c935c 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -715,8 +715,10 @@
 
 	if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
 		pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+		of_node_put(np);
 		return;
 	}
+	of_node_put(np);
 
 	pm_data = (const struct exynos_pm_data *) match->data;
 
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index bfeb25a..326e870 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -16,30 +16,23 @@
 #include "cpuidle.h"
 #include "hardware.h"
 
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
 
 static int imx6q_enter_wait(struct cpuidle_device *dev,
 			    struct cpuidle_driver *drv, int index)
 {
-	if (atomic_inc_return(&master) == num_online_cpus()) {
-		/*
-		 * With this lock, we prevent other cpu to exit and enter
-		 * this function again and become the master.
-		 */
-		if (!spin_trylock(&master_lock))
-			goto idle;
+	spin_lock(&cpuidle_lock);
+	if (++num_idle_cpus == num_online_cpus())
 		imx6_set_lpm(WAIT_UNCLOCKED);
-		cpu_do_idle();
-		imx6_set_lpm(WAIT_CLOCKED);
-		spin_unlock(&master_lock);
-		goto done;
-	}
+	spin_unlock(&cpuidle_lock);
 
-idle:
 	cpu_do_idle();
-done:
-	atomic_dec(&master);
+
+	spin_lock(&cpuidle_lock);
+	if (num_idle_cpus-- == num_online_cpus())
+		imx6_set_lpm(WAIT_CLOCKED);
+	spin_unlock(&cpuidle_lock);
 
 	return index;
 }
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f..fe4932f 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@
 	}
 };
 
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
 static struct iop_adma_platform_data iop13xx_adma_0_data = {
 	.hw_id = 0,
 	.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@
 	.resource = iop13xx_adma_0_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_0_data,
 	},
 };
@@ -336,7 +336,7 @@
 	.resource = iop13xx_adma_1_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_1_data,
 	},
 };
@@ -348,7 +348,7 @@
 	.resource = iop13xx_adma_2_resources,
 	.dev = {
 		.dma_mask = &iop13xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop13xx_adma_2_data,
 	},
 };
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec..116feb6 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@
 	}
 };
 
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
 static struct platform_device iop13xx_tpmi_0_device = {
 	.name = "iop-tpmi",
 	.id = 0,
@@ -160,7 +160,7 @@
 	.resource = iop13xx_tpmi_0_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -171,7 +171,7 @@
 	.resource = iop13xx_tpmi_1_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -182,7 +182,7 @@
 	.resource = iop13xx_tpmi_2_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
@@ -193,7 +193,7 @@
 	.resource = iop13xx_tpmi_3_resources,
 	.dev = {
 		.dma_mask          = &iop13xx_tpmi_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 	},
 };
 
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 70b3eaf..5ca7e29 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -115,6 +115,7 @@
 	u32 enable_mask, enable_shift;
 	u32 pipd_mask, pipd_shift;
 	u32 reg;
+	int ret;
 
 	if (dsi_id == 0) {
 		enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -130,7 +131,11 @@
 		return -ENODEV;
 	}
 
-	regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
+	ret = regmap_read(omap4_dsi_mux_syscon,
+					  OMAP4_DSIPHY_SYSCON_OFFSET,
+					  &reg);
+	if (ret)
+		return ret;
 
 	reg &= ~enable_mask;
 	reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index f1ca947..9e14604 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -533,8 +533,10 @@
 
 	prm_ll_data->reset_system();
 
-	while (1)
+	while (1) {
 		cpu_relax();
+		wfe();
+	}
 }
 
 /**
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index 262ab07..f4fdfca 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -70,16 +70,16 @@
 
 	switch (val) {
 	case CPUFREQ_PRECHANGE:
-		if (old_dvs & !new_dvs ||
-		    cur_dvs & !new_dvs) {
+		if ((old_dvs && !new_dvs) ||
+		    (cur_dvs && !new_dvs)) {
 			pr_debug("%s: exiting dvs\n", __func__);
 			cur_dvs = false;
 			gpio_set_value(OSIRIS_GPIO_DVS, 1);
 		}
 		break;
 	case CPUFREQ_POSTCHANGE:
-		if (!old_dvs & new_dvs ||
-		    !cur_dvs & new_dvs) {
+		if ((!old_dvs && new_dvs) ||
+		    (!cur_dvs && new_dvs)) {
 			pr_debug("entering dvs\n");
 			cur_dvs = true;
 			gpio_set_value(OSIRIS_GPIO_DVS, 0);
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8d..d961222 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@
 	.resource = iop3xx_dma_0_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_dma_0_data,
 	},
 };
@@ -155,7 +155,7 @@
 	.resource = iop3xx_dma_1_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_dma_1_data,
 	},
 };
@@ -167,7 +167,7 @@
 	.resource = iop3xx_aau_resources,
 	.dev = {
 		.dma_mask = &iop3xx_adma_dmamask,
-		.coherent_dma_mask = DMA_BIT_MASK(64),
+		.coherent_dma_mask = DMA_BIT_MASK(32),
 		.platform_data = (void *) &iop3xx_aau_data,
 	},
 };
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 272f49b..bb29e6e 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -605,7 +605,7 @@
 	.resource	= orion_xor0_shared_resources,
 	.dev            = {
 		.dma_mask               = &orion_xor_dmamask,
-		.coherent_dma_mask      = DMA_BIT_MASK(64),
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
 		.platform_data          = &orion_xor0_pdata,
 	},
 };
@@ -666,7 +666,7 @@
 	.resource	= orion_xor1_shared_resources,
 	.dev            = {
 		.dma_mask               = &orion_xor_dmamask,
-		.coherent_dma_mask      = DMA_BIT_MASK(64),
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
 		.platform_data          = &orion_xor1_pdata,
 	},
 };
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index e8229b9..3265b8f 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -258,7 +258,7 @@
 
 config SAMSUNG_PM_CHECK
 	bool "S3C2410 PM Suspend Memory CRC"
-	depends on PM
+	depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
 	select CRC32
 	help
 	  Enable the PM code's memory area checksum over sleep. This option
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2d53e26..bca2c3c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1264,6 +1264,10 @@
 	def_bool y
 	depends on COMPAT && SYSVIPC
 
+config KEYS_COMPAT
+	def_bool y
+	depends on COMPAT && KEYS
+
 endmenu
 
 menu "Power management options"
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm660-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm660-pm.dtsi
index dc2e850..bd493fd 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pm660-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm660-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -272,6 +272,11 @@
 		};
 	};
 
+	qcom,lpm-workarounds {
+		compatible = "qcom,lpm-workarounds";
+		qcom,lpm-wa-skip-l2-spm;
+	};
+
 	qcom,pm@8600664 {
 		compatible = "qcom,pm";
 		reg = <0x8600664 0x40>;
@@ -286,6 +291,10 @@
 		qcom,synced-clocks;
 	};
 
+	qcom,cpu-sleep-status {
+		compatible = "qcom,cpu-sleep-status";
+	};
+
 	qcom,rpm-stats@29dba0 {
 		compatible = "qcom,rpm-stats";
 		reg = <0x29dba0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
index f07069c..94cc560 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -480,6 +480,58 @@
 				};
 			};
 		};
+		pmx_quat_mi2s {
+			label = "quat_mi2s";
+			quat_mi2s_active: quat_mi2s_active {
+				mux {
+					pins = "gpio94", "gpio95";
+					function = "sec_mi2s";
+				};
+				configs {
+					pins = "gpio94", "gpio95";
+					drive-strength = <8>;   /* 8 MA */
+					bias-disable;           /* No PULL */
+				};
+			};
+			quat_mi2s_sleep: quat_mi2s_sleep {
+				mux {
+					pins = "gpio94", "gpio95";
+					function = "sec_mi2s";
+				};
+				configs {
+					pins = "gpio94", "gpio95";
+					drive-strength = <2>;   /* 2 MA */
+					bias-pull-down;	        /* PULL DOWN */
+				};
+			};
+		};
+
+		pmx_quat_mi2s_din {
+			label = "quat_mi2s_din";
+			quat_mi2s_din_active: quat_mi2s_din_active {
+				mux {
+					pins = "gpio12", "gpio13";
+					function = "sec_mi2s";
+				};
+				configs {
+					pins = "gpio12", "gpio13";
+					drive-strength = <8>;   /* 8 MA */
+					bias-disable;	        /* No PULL */
+					output-high;
+				};
+			};
+			quat_mi2s_din_sleep: quat_mi2s_din_sleep {
+				mux {
+					pins = "gpio12", "gpio13";
+					function = "sec_mi2s";
+				};
+				configs {
+					pins = "gpio12", "gpio13";
+					drive-strength = <2>;   /* 2 MA */
+					bias-pull-down;	        /* PULL DOWN */
+				};
+			};
+		};
 
 		cdc_mclk2_pin {
 			cdc_mclk2_sleep: cdc_mclk2_sleep {
@@ -1455,7 +1507,59 @@
 				};
 			};
 		};
+		cdc-dmic-lines {
+			cdc_dmic0_clk_act: dmic0_clk_on {
+				mux {
+					pins = "gpio89";
+					function = "dmic0_clk";
+				};
 
+				config {
+					pins = "gpio89";
+					drive-strength = <8>;
+					bias-pull-none;
+				};
+			};
+
+			cdc_dmic0_clk_sus: dmic0_clk_off {
+				mux {
+					pins = "gpio89";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio89";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			cdc_dmic0_data_act: dmic0_data_on {
+				mux {
+					pins = "gpio90";
+					function = "dmic0_data";
+				};
+
+				config {
+					pins = "gpio90";
+					drive-strength = <8>;
+					bias-pull-none;
+				};
+			};
+
+			cdc_dmic0_data_sus: dmic0_data_off {
+				mux {
+					pins = "gpio90";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio90";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+		};
 		/*sensors */
 		cam_sensor_mclk0_default: cam_sensor_mclk0_default {
 			/* MCLK0 */
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 3a108b8..ed6ca20 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -97,6 +97,10 @@
 	compatible = "qcom,qcs605-llcc";
 };
 
+&mdss_mdp {
+	/delete-property/ qcom,sde-inline-rotator;
+};
+
 &ipa_hw {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dts b/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dts
index 951720e..c9d952d 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dts
@@ -20,5 +20,5 @@
 	model = "Qualcomm Technologies, Inc. SDM429 QRD Spyro";
 	compatible = "qcom,sdm429-qrd", "qcom,sdm429", "qcom,qrd";
 	qcom,board-id = <0xb 6>;
-	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+	qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dtsi b/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dtsi
index 6de406d..4f7ccde 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429-qrd-spyro-evt.dtsi
@@ -13,11 +13,115 @@
 
 #include "sdm439-qrd.dtsi"
 #include "sdm429w-pm660.dtsi"
+#include "sdm429w-camera-sensor-spyro.dtsi"
+
+&gpio_key_active {
+	mux {
+		pins = "gpio91", "gpio127", "gpio128", "gpio35", "gpio126";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio91", "gpio127", "gpio128", "gpio35", "gpio126";
+		drive-strength = <2>;
+		bias-pull-up;
+	};
+};
+
+&gpio_key_suspend {
+	mux {
+		pins = "gpio91", "gpio127", "gpio128", "gpio35", "gpio126";
+		function = "gpio";
+	};
+
+	config {
+		pins = "gpio91", "gpio127", "gpio128", "gpio35", "gpio126";
+		drive-strength = <2>;
+		bias-pull-up;
+	};
+};
+
+&cdc_pdm_lines_act {
+	mux {
+		pins = "gpio68", "gpio73", "gpio74";
+		function = "cdc_pdm0";
+	};
+
+	config {
+		pins = "gpio68", "gpio73", "gpio74";
+		drive-strength = <16>;
+	};
+};
+
+&cdc_pdm_lines_sus {
+	mux {
+		pins = "gpio68", "gpio73", "gpio74";
+		function = "cdc_pdm0";
+	};
+
+	config {
+		pins = "gpio68", "gpio73", "gpio74";
+		drive-strength = <2>;
+		bias-disable;
+	};
+};
 
 &mdss_dsi0 {
 	qcom,dsi-pref-prim-pan = <&dsi_hx8399c_hd_vid>;
 };
 
+&i2c_5 {
+	status = "disabled";
+};
+
+&vol_up {
+	gpios = <&tlmm 35 0x1>;
+};
+
+&gpio_keys {
+	function_1: function_1 {
+		label = "function_1";
+		gpios = <&tlmm 127 0x1>;
+		linux,input-type = <1>;
+		linux,code = <116>;
+		debounce-interval = <15>;
+		linux,can-disable;
+		gpio-key,wakeup;
+	};
+
+	function_2: function_2 {
+		label = "function_2";
+		gpios = <&tlmm 126 0x1>;
+		linux,input-type = <1>;
+		linux,code = <117>;
+		debounce-interval = <15>;
+		linux,can-disable;
+		gpio-key,wakeup;
+	};
+};
+
+&soc {
+	/delete-node/ qcom,spm@b1d2000;
+	qcom,spm@b1d2000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb1d2000 0x1000>;
+		qcom,name = "system-cci";
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,saw2-cfg = <0x14>;
+		qcom,saw2-spm-dly= <0x3C102800>;
+		qcom,saw2-spm-ctl = <0xe>;
+		qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,vctl-port-ub = <0x1>;
+		qcom,pfm-port = <0x2>;
+	};
+
+};
+
+
 &dsi_hx8399c_hd_vid {
 		qcom,mdss-dsi-on-command = [
 			39 01 00 00 00 00 04
@@ -129,9 +233,87 @@
 };
 
 &adsp_fw_mem {
-	reg = <0x0 0x8b800000 0x0 0x1100000>;
+	reg = <0x0 0x8b800000 0x0 0x1400000>;
 };
 
 &wcnss_fw_mem {
-	reg = <0x0 0x8c900000 0x0 0x700000>;
+	reg = <0x0 0x8cc00000 0x0 0x700000>;
+};
+
+
+&int_codec {
+	compatible = "qcom,msm8952-dig-asoc-snd";
+	status = "okay";
+	qcom,model = "sdm429-qrd-snd-card";
+	qcom,msm-ext-pa = "quaternary";
+	/delete-property/ qcom,split-a2dp;
+	asoc-wsa-codec-names;
+	asoc-wsa-codec-prefixes;
+	qcom,audio-routing =
+		"CDC_CONN", "MCLK",
+		"QUAT_MI2S_RX", "DIGITAL_REGULATOR",
+		"TX_I2S_CLK", "DIGITAL_REGULATOR",
+		"DMIC1", "Digital Mic1",
+		"DMIC2", "Digital Mic2";
+	qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+	qcom,quat-mi2s-gpios = <&cdc_quat_mi2s_gpios>;
+	qcom,msm-gpios =
+		"quat_i2s",
+		"dmic";
+	qcom,pinctrl-names =
+		"all_off",
+		"quat_i2s_act",
+		"dmic_act",
+		"quat_i2s_dmic_act";
+	pinctrl-names =
+		"all_off",
+		"quat_i2s_act",
+		"dmic_act",
+		"quat_i2s_dmic_act";
+	pinctrl-0 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+		&cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+	pinctrl-1 = <&quat_mi2s_active &quat_mi2s_din_active
+		&cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+	pinctrl-2 = <&quat_mi2s_sleep &quat_mi2s_din_sleep
+		&cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+	pinctrl-3 = <&quat_mi2s_active &quat_mi2s_din_active
+		&cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+	/delete-property/qcom,cdc-us-euro-gpios;
+
+	asoc-codec = <&stub_codec>, <&msm_dig_codec>;
+	asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec";
+};
+
+&soc {
+	msm_dig_codec: msm_dig_codec {
+		compatible = "qcom,msm-digital-codec";
+		reg = <0xc0f0000 0x0>;
+		qcom,no-analog-codec;
+		cdc-vdd-digital-supply = <&pm660_l9>;
+		qcom,cdc-vdd-digital-voltage = <1800000 1800000>;
+		qcom,cdc-vdd-digital-current = <10000>;
+		qcom,cdc-on-demand-supplies = "cdc-vdd-digital";
+	};
+
+	cdc_dmic_gpios: cdc_dmic_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_dmic0_clk_act &cdc_dmic0_data_act>;
+		pinctrl-1 = <&cdc_dmic0_clk_sus &cdc_dmic0_data_sus>;
+	};
+
+	cdc_quat_mi2s_gpios: msm_cdc_pinctrl_quat {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&quat_mi2s_active &quat_mi2s_din_active>;
+		pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_din_sleep>;
+	};
+};
+
+&wsa881x_i2c_f {
+	status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-camera-sensor-spyro.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-camera-sensor-spyro.dtsi
new file mode 100644
index 0000000..f0dec67
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm429w-camera-sensor-spyro.dtsi
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2015-2016, 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8952.h>
+
+
+&cci {
+	qcom,camera@0 {
+		/delete-property/ qcom,eeprom-src;
+		/delete-property/ qcom,actuator-src;
+	};
+	qcom,camera@2 {
+		/delete-property/ qcom,eeprom-src;
+		/delete-property/ qcom,actuator-src;
+	};
+};
+
+&cci {
+	/delete-node/ qcom,actuator@0;
+	/delete-node/ qcom,actuator@1;
+	/delete-node/ qcom,eeprom@0;
+	/delete-node/ qcom,eeprom@1;
+	/delete-node/ qcom,camera@0;
+	/delete-node/ qcom,camera@1;
+	/delete-node/ qcom,camera@2;
+};
+
+&cci {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	actuator_spyro0: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		qcom,cci-master = <0>;
+		cam_vaf-supply = <&pm660_l19>;
+		qcom,cam-vreg-name = "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2850000>;
+		qcom,cam-vreg-max-voltage = <2850000>;
+		qcom,cam-vreg-op-mode = <80000>;
+	};
+
+	actuator_spyro1: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		qcom,cci-master = <0>;
+		cam_vaf-supply = <&pm660_l17>;
+		qcom,cam-vreg-name = "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2850000>;
+		qcom,cam-vreg-max-voltage = <2850000>;
+		qcom,cam-vreg-op-mode = <80000>;
+	};
+
+	eeprom_spyro0: qcom,eeprom@0 {
+		cell-index = <0>;
+		compatible = "qcom,eeprom";
+		qcom,cci-master = <0>;
+		reg = <0x0>;
+		cam_vana-supply = <&pm660_l6>;
+		cam_vio-supply = <&pm660_l14>;
+		cam_vaf-supply = <&pm660_l19>;
+		cam_vdig-supply = <&pm660_l2>;
+		qcom,cam-vreg-name = "cam_vana", "cam_vio",
+					"cam_vdig", "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2800000 0 1200000 2850000>;
+		qcom,cam-vreg-max-voltage = <2800000 0 1200000 2850000>;
+		qcom,cam-vreg-op-mode = <80000 0 200000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+			&cam_sensor_rear_reset
+			&cam_sensor_rear_vana>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep
+			&cam_sensor_rear_reset_sleep
+			&cam_sensor_rear_vana_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 35 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-vana = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_VANA";
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	eeprom_spyro1: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		qcom,eeprom-name = "sunny_8865";
+		compatible = "qcom,eeprom";
+		qcom,slave-addr = <0x6c>;
+		qcom,cci-master = <0>;
+		qcom,num-blocks = <8>;
+
+		qcom,page0 = <1 0x0100 2 0x01 1 1>;
+		qcom,poll0 = <0 0x0 2 0x0 1 0>;
+		qcom,mem0 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page1 = <1 0x5002 2 0x00 1 0>;
+		qcom,poll1 = <0 0x0 2 0x0 1 0>;
+		qcom,mem1 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page2 = <1 0x3d84 2 0xc0 1 0>;
+		qcom,poll2 = <0 0x0 2 0x0 1 0>;
+		qcom,mem2 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page3 = <1 0x3d88 2 0x70 1 0>;
+		qcom,poll3 = <0 0x0 2 0x0 1 0>;
+		qcom,mem3 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page4 = <1 0x3d89 2 0x10 1 0>;
+		qcom,poll4 = <0 0x0 2 0x0 1 0>;
+		qcom,mem4 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page5 = <1 0x3d8a 2 0x70 1 0>;
+		qcom,poll5 = <0 0x0 2 0x0 1 0>;
+		qcom,mem5 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page6 = <1 0x3d8b 2 0xf4 1 0>;
+		qcom,poll6 = <0 0x0 2 0x0 1 0>;
+		qcom,mem6 = <0 0x0 2 0x0 1 0>;
+
+		qcom,page7 = <1 0x3d81 2 0x01 1 10>;
+		qcom,poll7 = <0 0x0 2 0x0 1 1>;
+		qcom,mem7 = <1536 0x7010 2 0 1 0>;
+
+		cam_vdig-supply = <&pm660_l3>;
+		cam_vana-supply = <&pm660_l7>;
+		cam_vio-supply = <&pm660_l6>;
+		cam_vaf-supply = <&pm660_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio",
+				"cam_vana", "cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+			&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+			&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 28 0>,
+			<&tlmm 40 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,cam-power-seq-type = "sensor_vreg", "sensor_vreg",
+			"sensor_vreg",
+			"sensor_gpio", "sensor_gpio" , "sensor_clk";
+		qcom,cam-power-seq-val = "cam_vdig", "cam_vana", "cam_vio",
+			"sensor_gpio_reset", "sensor_gpio_standby",
+			"sensor_cam_mclk";
+		qcom,cam-power-seq-cfg-val = <1 1 1 1 1 24000000>;
+		qcom,cam-power-seq-delay = <1 1 1 30 30 5>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <19200000 0>;
+	};
+
+	qcom,camera@0 {
+		cell-index = <0>;
+		compatible = "qcom,camera";
+		reg = <0x0>;
+		qcom,csiphy-sd-index = <0>;
+		qcom,csid-sd-index = <0>;
+		qcom,mount-angle = <90>;
+		//qcom,led-flash-src = <&led_flash0>;
+		//qcom,eeprom-src = <&eeprom_spyro0>;
+		qcom,actuator-src = <&actuator_spyro0>;
+		cam_vana-supply = <&pm660_l6>;
+		cam_vio-supply = <&pm660_l14>;
+		cam_vaf-supply = <&pm660_l19>;
+		cam_vdig-supply = <&pm660_l2>;
+		qcom,cam-vreg-name = "cam_vana", "cam_vio",
+				"cam_vdig", "cam_vaf";
+		qcom,cam-vreg-min-voltage = <2800000 0 1200000 2850000>;
+		qcom,cam-vreg-max-voltage = <2800000 0 1200000 2850000>;
+		qcom,cam-vreg-op-mode = <80000 0 200000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_default
+				&cam_sensor_rear_reset
+				&cam_sensor_rear_vana>;
+		pinctrl-1 = <&cam_sensor_mclk0_sleep
+				&cam_sensor_rear_reset_sleep
+				&cam_sensor_rear_vana_sleep>;
+		gpios = <&tlmm 26 0>,
+			<&tlmm 36 0>,
+			<&tlmm 58 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-vana = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+			"CAM_RESET0",
+			"CAM_VANA";
+		qcom,sensor-position = <0>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk0_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk0_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@1 {
+		cell-index = <1>;
+		compatible = "qcom,camera";
+		reg = <0x1>;
+		qcom,csiphy-sd-index = <1>;
+		qcom,csid-sd-index = <1>;
+		qcom,mount-angle = <270>;
+		cam_vdig-supply = <&pm660_l3>;
+		cam_vio-supply = <&pm660_l14>;
+		cam_vaf-supply = <&pm660_l19>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+							"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <200000 0 80000 100000>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_default
+				&cam_sensor_front_default>;
+		pinctrl-1 = <&cam_sensor_mclk1_sleep
+				&cam_sensor_front_sleep>;
+		gpios = <&tlmm 27 0>,
+			<&tlmm 33 0>,
+			<&tlmm 66 0>,
+			<&tlmm 38 0>;
+		qcom,gpio-vana= <1>;
+		qcom,gpio-vdig= <2>;
+		qcom,gpio-reset = <3>;
+		qcom,gpio-req-tbl-num = <0 1 2 3>;
+		qcom,gpio-req-tbl-flags = <1 0 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+			"CAM_AVDD1",
+			"CAM_DVDD1",
+			"CAM_RESET1";
+		qcom,sensor-position = <0x1>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <1>;
+		clocks = <&clock_gcc clk_mclk1_clk_src>,
+				<&clock_gcc clk_gcc_camss_mclk1_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+
+	qcom,camera@2 {
+		cell-index = <2>;
+		compatible = "qcom,camera";
+		reg = <0x02>;
+		qcom,csiphy-sd-index = <1>;
+		qcom,csid-sd-index = <1>;
+		qcom,mount-angle = <270>;
+		qcom,eeprom-src = <&eeprom_spyro1>;
+		qcom,actuator-src = <&actuator_spyro1>;
+		cam_vdig-supply = <&pm660_l3>;
+		cam_vana-supply = <&pm660_l7>;
+		cam_vio-supply = <&pm660_l6>;
+		cam_vaf-supply = <&pm660_l17>;
+		qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+					"cam_vaf";
+		qcom,cam-vreg-min-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-max-voltage = <1200000 0 2800000 2850000>;
+		qcom,cam-vreg-op-mode = <105000 0 80000 100000>;
+		qcom,gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_default
+				&cam_sensor_front1_default>;
+		pinctrl-1 = <&cam_sensor_mclk2_sleep
+				&cam_sensor_front1_sleep>;
+		gpios = <&tlmm 27 0>,
+			<&tlmm 38 0>,
+			<&tlmm 39 0>;
+		qcom,gpio-reset = <1>;
+		qcom,gpio-standby = <2>;
+		qcom,gpio-req-tbl-num = <0 1 2>;
+		qcom,gpio-req-tbl-flags = <1 0 0>;
+		qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+					  "CAM_RESET2",
+					  "CAM_STANDBY2";
+		qcom,sensor-position = <1>;
+		qcom,sensor-mode = <0>;
+		qcom,cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_gcc clk_mclk2_clk_src>,
+			<&clock_gcc clk_gcc_camss_mclk2_clk>;
+		clock-names = "cam_src_clk", "cam_clk";
+		qcom,clock-rates = <24000000 0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
index 7238e1b..2927e91 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
@@ -163,26 +163,26 @@
 		/delete-property/ vdd-apc-supply;
 	};
 
-	qcom,cpu-clock-8939@b111050 {
-		/delete-property/ vdd-c0-supply;
-		/delete-property/ vdd-c1-supply;
-		/delete-property/ vdd-cci-supply;
-	};
-
 	qcom,gcc@1800000 {
 		/delete-property/ vdd_dig-supply;
 		/delete-property/ vdd_sr2_dig-supply;
 		/delete-property/ vdd_sr2_pll-supply;
 		/delete-property/ vdd_hf_dig-supply;
 		/delete-property/ vdd_hf_pll-supply;
+		vdd_dig-supply = <&VDD_CX_LEVEL>;
+		vdd_sr2_dig-supply = <&VDD_CX_LEVEL_AO>;
+		vdd_sr2_pll-supply = <&L12A_AO>;
+		vdd_hf_dig-supply = <&VDD_CX_LEVEL_AO>;
+		vdd_hf_pll-supply = <&L12A_AO>;
 	};
 
 	qcom,lpass@c200000 {
-		/delete-property/ vdd_cx-supply;
+		vdd_cx-supply = <&pm660_s2_level>;
 	};
 
 	qcom,pronto@a21b000 {
 		/delete-property/ vdd_pronto_pll-supply;
+		vdd_pronto_pll-supply = <&pm660_l12>;
 	};
 
 	qcom,wcnss-wlan@0a000000 {
@@ -194,6 +194,13 @@
 		/delete-property/ qcom,iris-vddpa-supply;
 		/delete-property/ qcom,iris-vdddig-supply;
 		/delete-property/ qcom,wcnss-adc_tm;
+		 qcom,pronto-vddmx-supply = <&pm660_s2_level_ao>;
+		 qcom,pronto-vddcx-supply = <&pm660_s1_level>;
+		 qcom,pronto-vddpx-supply = <&pm660_l13>;
+		 qcom,iris-vddxo-supply   = <&pm660_l12>;
+		 qcom,iris-vddrfa-supply  = <&pm660_l5>;
+		 qcom,iris-vdddig-supply  = <&pm660_l13>;
+		 qcom,wcnss-adc_tm = <&pm660_adc_tm>;
 	};
 
 	qcom,csid@1b30000 {
@@ -241,12 +248,11 @@
 };
 
 &pil_mss {
-	/delete-property/ vdd_mss-supply;
-	/delete-property/ vdd_cx-supply;
-	/delete-property/ vdd_cx-voltage;
-	/delete-property/ vdd_mx-supply;
-	/delete-property/ vdd_mx-uV;
-	/delete-property/ vdd_pll-supply;
+	vdd_mss-supply = <&S6A>;
+	vdd_cx-supply = <&pm660_s1_level_ao>;
+	vdd_mx-supply = <&pm660_s2_level_ao>;
+	vdd_pll-supply = <&L12A>;
+	qcom,vdd_pll = <1800000>;
 };
 
 &cci {
@@ -692,4 +698,33 @@
 
 &usb_otg {
 	extcon = <&pm660_charger>;
+	hsusb_vdd_dig-supply = <&L6A>;
+	HSUSB_1p8-supply = <&L12A>;
+	HSUSB_3p3-supply = <&L16A>;
+	qcom,vdd-voltage-level = <0 928000 928000>;
+	vbus_otg-supply = <&smb2_vbus>;
+	status = "okay";
+};
+
+&sdhc_1 {
+	/* device core power supply */
+	vdd-supply = <&L19A>;
+	qcom,vdd-voltage-level = <2900000 3200000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&L13A>;
+	qcom,vdd-io-current-level = <0 60000>;
+	qcom,dll-hsr-list = <0x00076400 0x0 0x0 0x0 0x00040868>;
+};
+
+&sdhc_2 {
+       /* device core power supply */
+	vdd-supply = <&vreg_sd_vdd>;
+	qcom,vdd-current-level = <0 800000>;
+
+	/* device communication power supply */
+	vdd-io-supply = <&vreg_sd_pad>;
+	qcom,vdd-io-current-level = <0 10000>;
+	qcom,dll-hsr-list = <0x00076400 0x0 0x0 0x0 0x00040868>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
index 905e128..75cc878 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
@@ -13,6 +13,7 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
 
 &rpm_bus {
 	/* PM660 S1 - VDD_CX supply */
@@ -116,6 +117,21 @@
 		};
 	};
 
+	rpm-regulator-smpa6 {
+		status = "okay";
+		S6A: pm660_s6_level: regulator-s6-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm660_s6_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-level;
+			status = "okay";
+		};
+	};
+
 	rpm-regulator-ldoa1 {
 		status = "okay";
 		L1A: pm660_l1: regulator-l1 {
@@ -309,12 +325,12 @@
 /* SPM controlled regulators */
 &spmi_bus {
 	qcom,pm660@1 {
-		S3A: pm660_s3: spm-regulator@1a00 {
+		apc_vreg_regulator: S3A: pm660_s3: spm-regulator@1a00 {
 			compatible = "qcom,spm-regulator";
 			reg = <0x1a00 0x100>;
 			regulator-name = "pm660_s3";
-			regulator-min-microvolt = <725000>;
-			regulator-max-microvolt = <736000>;
+			regulator-min-microvolt = <490000>;
+			regulator-max-microvolt = <980000>;
 		};
 	};
 };
@@ -323,4 +339,23 @@
 	regulator@b018000 {
 		vdd-apc-supply = <&pm660_s3>;
 	};
+
+	vreg_sd_pad: vreg_sd_pad {
+		compatible = "regulator-gpio";
+		regulator-name = "vreg_sd_pad";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <2950000>;
+		enable-gpio = <&tlmm 63 GPIO_ACTIVE_HIGH>;
+		gpios = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+		states = <1800000 0>, <2950000 1>;
+		startup-delay-us = <200000>;
+		enable-active-high;
+	};
+
+	vreg_sd_vdd: sd-vdd-fixed-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "sd_vdd";
+		regulator-min-microvolt = <2950000>;
+		regulator-max-microvolt = <2950000>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
index c382f06..3d2ff51 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-qrd.dtsi
@@ -130,13 +130,13 @@
 };
 
 &soc {
-	gpio_keys {
+	gpio_keys: gpio_keys {
 		compatible = "gpio-keys";
 		label = "gpio-keys";
 		pinctrl-names = "default";
 		pinctrl-0 = <&gpio_key_active>;
 
-		vol_up {
+		vol_up: vol_up {
 			label = "volume_up";
 			gpios = <&tlmm 91 0x1>;
 			linux,input-type = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index ddf9ee6..e4e74cb 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -682,7 +682,15 @@
 };
 
 &sdhc_1 {
-	qcom,ddr-config = <0x00040868>;
+	/* DLL HSR settings. Refer go/hsr - <Target> DLL settings */
+	qcom,dll-hsr-list = <0x00076400 0x0 0x0 0x0 0x00040868>;
+
+};
+
+&sdhc_2 {
+	/* DLL HSR settings. Refer go/hsr - <Target> DLL settings */
+	qcom,dll-hsr-list = <0x00076400 0x0 0x0 0x0 0x00040868>;
+
 };
 
 &mdss_mdp {
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
index 4f6e7f5..5f9b046 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 SOC";
 	compatible = "qcom,sdm450";
-	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+	qcom,board-id = <8 3>;
+	qcom,pmic-id = <0x16 0x25 0x0 0x0>;
 	qcom,pmic-name = "PMI632";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
index 78047bd..fd0aa8a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -194,6 +194,11 @@
 };
 
 &cam_cci {
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 5dbf55a..112b30a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -463,6 +463,7 @@
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
 					fsmgr_flags = "wait,slotselect,avb";
+					status = "ok";
 				};
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.1-rb3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.1-rb3.dtsi
index 87624b1..eebc565 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.1-rb3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.1-rb3.dtsi
@@ -101,6 +101,7 @@
 	};
 };
 
-&pil_modem {
+&ssc_sensors {
 	status = "disabled";
 };
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 0b6e14a7..2d36265 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -508,6 +508,7 @@
 					type = "ext4";
 					mnt_flags = "ro,barrier=1,discard";
 					fsmgr_flags = "wait,slotselect,avb";
+					status = "ok";
 				};
 			};
 		};
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 5aead19..3c1e253 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -6,6 +6,7 @@
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUP_FREEZER=y
@@ -57,8 +58,6 @@
 CONFIG_ARM64_SW_TTBR0_PAN=y
 CONFIG_ARM64_LSE_ATOMICS=y
 CONFIG_RANDOMIZE_BASE=y
-CONFIG_CMDLINE="console=ttyAMA0"
-CONFIG_CMDLINE_EXTEND=y
 # CONFIG_EFI is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
@@ -81,6 +80,7 @@
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
 CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -182,6 +182,7 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_U32=y
 CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
@@ -195,7 +196,6 @@
 # CONFIG_MAC80211_RC_MINSTREL is not set
 CONFIG_RFKILL=y
 # CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
 # CONFIG_ALLOW_DEV_COREDUMP is not set
 CONFIG_DEBUG_DEVRES=y
 CONFIG_OF_UNITTEST=y
@@ -216,6 +216,7 @@
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
 CONFIG_DM_VERITY_AVB=y
+CONFIG_DM_BOW=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -282,6 +283,7 @@
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_VIRTIO_CONSOLE=y
@@ -383,6 +385,7 @@
 # CONFIG_MMC_BLOCK is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_SYSTOHC is not set
+CONFIG_RTC_DRV_PL030=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_VIRTIO_PCI=y
 # CONFIG_VIRTIO_PCI_LEGACY is not set
@@ -412,6 +415,7 @@
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig
index 704d9b5..4195a35 100644
--- a/arch/arm64/configs/ranchu64_defconfig
+++ b/arch/arm64/configs/ranchu64_defconfig
@@ -188,7 +188,6 @@
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TABLET=y
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=y
 # CONFIG_SERIO_SERPORT is not set
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 8d8a6b77..386de91 100755
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -598,6 +598,7 @@
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 3ff219d..1558f6a 100755
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -618,6 +618,7 @@
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 73c30e0..9e0c7a8 100755
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -597,6 +597,7 @@
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 36a0b33..99b464a 100755
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -616,6 +616,7 @@
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 3363560..7bc459d 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -74,12 +74,13 @@
 	beq	10f
 	ext	v0.16b, v0.16b, v0.16b, #1	/* rotate out the mac bytes */
 	b	7b
-8:	mov	w7, w8
+8:	cbz	w8, 91f
+	mov	w7, w8
 	add	w8, w8, #16
 9:	ext	v1.16b, v1.16b, v1.16b, #1
 	adds	w7, w7, #1
 	bne	9b
-	eor	v0.16b, v0.16b, v1.16b
+91:	eor	v0.16b, v0.16b, v1.16b
 	st1	{v0.16b}, [x0]
 10:	str	w8, [x3]
 	ret
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
old mode 100644
new mode 100755
index a891bb6..e69df28
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -30,8 +30,8 @@
 "	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
-"2:	stlxr	%w3, %w0, %2\n"						\
-"	cbnz	%w3, 1b\n"						\
+"2:	stlxr	%w0, %w3, %2\n"						\
+"	cbnz	%w0, 1b\n"						\
 "	dmb	ish\n"							\
 "3:\n"									\
 "	.pushsection .fixup,\"ax\"\n"					\
@@ -56,23 +56,23 @@
 
 	switch (op) {
 	case FUTEX_OP_SET:
-		__futex_atomic_op("mov	%w0, %w4",
+		__futex_atomic_op("mov	%w3, %w4",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_ADD:
-		__futex_atomic_op("add	%w0, %w1, %w4",
+		__futex_atomic_op("add	%w3, %w1, %w4",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_OR:
-		__futex_atomic_op("orr	%w0, %w1, %w4",
+		__futex_atomic_op("orr	%w3, %w1, %w4",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	case FUTEX_OP_ANDN:
-		__futex_atomic_op("and	%w0, %w1, %w4",
+		__futex_atomic_op("and	%w3, %w1, %w4",
 				  ret, oldval, uaddr, tmp, ~oparg);
 		break;
 	case FUTEX_OP_XOR:
-		__futex_atomic_op("eor	%w0, %w1, %w4",
+		__futex_atomic_op("eor	%w3, %w1, %w4",
 				  ret, oldval, uaddr, tmp, oparg);
 		break;
 	default:
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d8039a1..e9ba70a 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -49,7 +49,15 @@
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  */
 #ifdef CONFIG_COMPAT
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
 #define TASK_SIZE_32		UL(0x100000000)
+#else
+#define TASK_SIZE_32		(UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
 #define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
 				TASK_SIZE_32 : TASK_SIZE_64)
 #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 96c11e7..129d610 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -40,7 +40,7 @@
 			   int sig, int code, const char *name);
 
 struct mm_struct;
-extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void show_pte(unsigned long addr);
 extern void __show_regs(struct pt_regs *);
 
 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 73ae90e..9f1adca 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -132,6 +132,7 @@
  */
 static int clear_os_lock(unsigned int cpu)
 {
+	write_sysreg(0, osdlr_el1);
 	write_sysreg(0, oslar_el1);
 	isb();
 	return 0;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0c721bd..12e073f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -535,8 +535,7 @@
 	/* GICv3 system register access */
 	mrs	x0, id_aa64pfr0_el1
 	ubfx	x0, x0, #24, #4
-	cmp	x0, #1
-	b.ne	3f
+	cbz	x0, 3f
 
 	mrs_s	x0, ICC_SRE_EL2
 	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index e017a94..72a660a 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -231,24 +231,33 @@
 
 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
 {
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
-	return 0;
+	return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_brk_fn)
 
 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
 {
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
 	compiled_break = 1;
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
-	return 0;
+	return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
 
 static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
 {
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
-	return 0;
+	return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
 
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d2b1b62..17f6471 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -450,6 +450,9 @@
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 	int retval;
 
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
 	/* return error if this is not our step */
 	retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
 
@@ -466,6 +469,9 @@
 int __kprobes
 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
 {
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
 	kprobe_handler(regs);
 	return DBG_HOOK_HANDLED;
 }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
old mode 100644
new mode 100755
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c1d02d1..f0166ee 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -81,18 +81,33 @@
 #endif
 
 /*
- * Dump out the page tables associated with 'addr' in mm 'mm'.
+ * Dump out the page tables associated with 'addr' in the currently active mm.
  */
-void show_pte(struct mm_struct *mm, unsigned long addr)
+void show_pte(unsigned long addr)
 {
+	struct mm_struct *mm;
 	pgd_t *pgd;
 
-	if (!mm)
+	if (addr < TASK_SIZE) {
+		/* TTBR0 */
+		mm = current->active_mm;
+		if (mm == &init_mm) {
+			pr_alert("[%016lx] user address but active_mm is swapper\n",
+				 addr);
+			return;
+		}
+	} else if (addr >= VA_START) {
+		/* TTBR1 */
 		mm = &init_mm;
+	} else {
+		pr_alert("[%016lx] address between user and kernel address ranges\n",
+			 addr);
+		return;
+	}
 
 	pr_alert("pgd = %p\n", mm->pgd);
 	pgd = pgd_offset(mm, addr);
-	pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
+	pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
 
 	do {
 		pud_t *pud;
@@ -178,8 +193,8 @@
 /*
  * The kernel tried to access some page that wasn't present.
  */
-static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
-			      unsigned int esr, struct pt_regs *regs)
+static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+			      struct pt_regs *regs)
 {
 	/*
 	 * Are we prepared to handle this kernel fault?
@@ -196,7 +211,7 @@
 		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
 		 "paging request", addr);
 
-	show_pte(mm, addr);
+	show_pte(addr);
 	die("Oops", regs, esr);
 	bust_spinlocks(0);
 	do_exit(SIGKILL);
@@ -220,7 +235,6 @@
 		pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
 			tsk->comm, task_pid_nr(tsk), inf->name, sig,
 			addr, esr);
-		show_pte(tsk->mm, addr);
 		show_regs(regs);
 	}
 
@@ -236,7 +250,6 @@
 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
 	struct task_struct *tsk = current;
-	struct mm_struct *mm = tsk->active_mm;
 	const struct fault_info *inf;
 
 	/*
@@ -247,7 +260,7 @@
 		inf = esr_to_fault_info(esr);
 		__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
 	} else
-		__do_kernel_fault(mm, addr, esr, regs);
+		__do_kernel_fault(addr, esr, regs);
 }
 
 #define VM_FAULT_BADMAP		0x010000
@@ -486,7 +499,7 @@
 	return 0;
 
 no_context:
-	__do_kernel_fault(mm, addr, esr, regs);
+	__do_kernel_fault(addr, esr, regs);
 	return 0;
 }
 
@@ -721,11 +734,12 @@
 	debug_fault_info[nr].name	= name;
 }
 
-asmlinkage int __exception do_debug_exception(unsigned long addr,
+asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
 					      unsigned int esr,
 					      struct pt_regs *regs)
 {
 	const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
+	unsigned long pc = instruction_pointer(regs);
 	struct siginfo info;
 	int rv;
 
@@ -736,19 +750,19 @@
 	if (interrupts_enabled(regs))
 		trace_hardirqs_off();
 
-	if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+	if (user_mode(regs) && pc > TASK_SIZE)
 		arm64_apply_bp_hardening();
 
-	if (!inf->fn(addr, esr, regs)) {
+	if (!inf->fn(addr_if_watchpoint, esr, regs)) {
 		rv = 1;
 	} else {
 		pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
-			 inf->name, esr, addr);
+			 inf->name, esr, pc);
 
 		info.si_signo = inf->sig;
 		info.si_errno = 0;
 		info.si_code  = inf->code;
-		info.si_addr  = (void __user *)addr;
+		info.si_addr  = (void __user *)pc;
 		arm64_notify_die("", regs, &info, 0);
 		rv = 0;
 	}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 201d918..ac31a2e 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -161,7 +161,7 @@
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
 	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
-			 pfn_to_nid(virt_to_pfn(_text)));
+			 pfn_to_nid(virt_to_pfn(lm_alias(_text))));
 
 	/*
 	 * vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index efb32b2..c5bad6d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -236,7 +236,8 @@
 	dc	cvac, cur_\()\type\()p		// Ensure any existing dirty
 	dmb	sy				// lines are written back before
 	ldr	\type, [cur_\()\type\()p]	// loading the entry
-	tbz	\type, #0, next_\()\type	// Skip invalid entries
+	tbz	\type, #0, skip_\()\type	// Skip invalid and
+	tbnz	\type, #11, skip_\()\type	// non-global entries
 	.endm
 
 	.macro __idmap_kpti_put_pgtable_ent_ng, type
@@ -296,8 +297,9 @@
 	add	end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
 do_pgd:	__idmap_kpti_get_pgtable_ent	pgd
 	tbnz	pgd, #1, walk_puds
-	__idmap_kpti_put_pgtable_ent_ng	pgd
 next_pgd:
+	__idmap_kpti_put_pgtable_ent_ng	pgd
+skip_pgd:
 	add	cur_pgdp, cur_pgdp, #8
 	cmp	cur_pgdp, end_pgdp
 	b.ne	do_pgd
@@ -325,8 +327,9 @@
 	add	end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
 do_pud:	__idmap_kpti_get_pgtable_ent	pud
 	tbnz	pud, #1, walk_pmds
-	__idmap_kpti_put_pgtable_ent_ng	pud
 next_pud:
+	__idmap_kpti_put_pgtable_ent_ng	pud
+skip_pud:
 	add	cur_pudp, cur_pudp, 8
 	cmp	cur_pudp, end_pudp
 	b.ne	do_pud
@@ -345,8 +348,9 @@
 	add	end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
 do_pmd:	__idmap_kpti_get_pgtable_ent	pmd
 	tbnz	pmd, #1, walk_ptes
-	__idmap_kpti_put_pgtable_ent_ng	pmd
 next_pmd:
+	__idmap_kpti_put_pgtable_ent_ng	pmd
+skip_pmd:
 	add	cur_pmdp, cur_pmdp, #8
 	cmp	cur_pmdp, end_pmdp
 	b.ne	do_pmd
@@ -364,7 +368,7 @@
 	add	end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
 do_pte:	__idmap_kpti_get_pgtable_ent	pte
 	__idmap_kpti_put_pgtable_ent_ng	pte
-next_pte:
+skip_pte:
 	add	cur_ptep, cur_ptep, #8
 	cmp	cur_ptep, end_ptep
 	b.ne	do_pte
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index e1c02ca..073bba6 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -23,7 +23,7 @@
 LDFLAGS += $(ldflags-y)
 
 ifeq ($(CROSS_COMPILE),)
-CROSS_COMPILE := h8300-unknown-linux-
+CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
 endif
 
 core-y	+= arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index f0dd9fc..a229d28 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -58,7 +58,10 @@
 cpuflags-$(CONFIG_M5206)	:= $(call cc-option,-mcpu=5206,-m5200)
 
 KBUILD_AFLAGS += $(cpuflags-y)
-KBUILD_CFLAGS += $(cpuflags-y) -pipe
+KBUILD_CFLAGS += $(cpuflags-y)
+
+KBUILD_CFLAGS += -pipe -ffreestanding
+
 ifdef CONFIG_MMU
 # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
 KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 4e5aa2f..87160b4 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -14,6 +14,7 @@
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/kernel.h>
 #include <linux/param.h>
 #include <linux/string.h>
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 4643e3a..2022130 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/io.h>
 
 #include <asm/setup.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 26a058d..c7c31e2 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -183,12 +183,6 @@
 	return ath79_sys_type;
 }
 
-int get_c0_perfcount_int(void)
-{
-	return ATH79_MISC_IRQ(5);
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
 unsigned int get_c0_compare_int(void)
 {
 	return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index c95d94c..5fc444d 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -65,7 +65,7 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
 CONFIG_FANOTIFY=y
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e776725..e4456e4 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -21,15 +21,15 @@
 #endif
 
 #ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+#define B_INSN "b32"
 #else
-#define NOP_INSN "nop"
+#define B_INSN "b"
 #endif
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-	asm_volatile_goto("1:\t" NOP_INSN "\n\t"
-		"nop\n\t"
+	asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+		"2:\tnop\n\t"
 		".pushsection __jump_table,  \"aw\"\n\t"
 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
 		".popsection\n\t"
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 7913a5c..b9c7887 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -125,7 +125,7 @@
 	subu	t1, v0,  __NR_O32_Linux
 	move	a1, v0
 	bnez	t1, 1f /* __NR_syscall at offset 0 */
-	lw	a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+	ld	a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
 	.set	pop
 
 1:	jal	syscall_trace_enter
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index f0a0e6d..2d965d9 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -138,6 +138,13 @@
 	PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
 #endif
 
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+	.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+		*(.appended_dtb)
+		KEEP(*(.appended_dtb))
+	}
+#endif
+
 #ifdef CONFIG_RELOCATABLE
 	. = ALIGN(4);
 
@@ -162,11 +169,6 @@
 	__appended_dtb = .;
 	/* leave space for appended DTB */
 	. += 0x100000;
-#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
-	.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
-		*(.appended_dtb)
-		KEEP(*(.appended_dtb))
-	}
 #endif
 	/*
 	 * Align to 64K in attempt to eliminate holes before the
diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
index cab5f43..d371f02 100644
--- a/arch/mips/loongson64/lemote-2f/irq.c
+++ b/arch/mips/loongson64/lemote-2f/irq.c
@@ -102,7 +102,7 @@
 static struct irqaction cascade_irqaction = {
 	.handler = no_action,
 	.name = "cascade",
-	.flags = IRQF_NO_THREAD,
+	.flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 };
 
 void __init mach_init_irq(void)
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07..eb83d65 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -59,14 +59,14 @@
 #define LDCW		ldcw,co
 #define BL		b,l
 # ifdef CONFIG_64BIT
-#  define LEVEL		2.0w
+#  define PA_ASM_LEVEL	2.0w
 # else
-#  define LEVEL		2.0
+#  define PA_ASM_LEVEL	2.0
 # endif
 #else
 #define LDCW		ldcw
 #define BL		bl
-#define LEVEL		1.1
+#define PA_ASM_LEVEL	1.1
 #endif
 
 #ifdef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2e674e1..656984e 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -323,6 +323,8 @@
 #define parisc_requires_coherency()	(0)
 #endif
 
+extern int running_on_qemu;
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index bbbe360..9b99eb07 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	.level	LEVEL
+	.level	PA_ASM_LEVEL
 
 	__INITDATA
 ENTRY(boot_args)
@@ -254,7 +254,7 @@
 	ldo		R%PA(fault_vector_11)(%r10),%r10
 
 $is_pa20:
-	.level		LEVEL /* restore 1.1 || 2.0w */
+	.level		PA_ASM_LEVEL /* restore 1.1 || 2.0w */
 #endif /*!CONFIG_64BIT*/
 	load32		PA(fault_vector_20),%r10
 
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index c3a532a..b4e3eda 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -189,6 +189,7 @@
  */
 
 int running_on_qemu __read_mostly;
+EXPORT_SYMBOL(running_on_qemu);
 
 void __cpuidle arch_cpu_idle_dead(void)
 {
@@ -206,12 +207,6 @@
 
 static int __init parisc_idle_init(void)
 {
-	const char *marker;
-
-	/* check QEMU/SeaBIOS marker in PAGE0 */
-	marker = (char *) &PAGE0->pad0;
-	running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
 	if (!running_on_qemu)
 		cpu_idle_poll_ctrl(1);
 
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 2e66a88..581b0c6 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -403,6 +403,9 @@
 	int ret, cpunum;
 	struct pdc_coproc_cfg coproc_cfg;
 
+	/* check QEMU/SeaBIOS marker in PAGE0 */
+	running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
 	cpunum = smp_processor_id();
 
 	set_firmware_width_unlocked();
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 5f7e57f..0cf379a 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -48,7 +48,7 @@
 	 */
 #define KILL_INSN	break	0,0
 
-	.level          LEVEL
+	.level          PA_ASM_LEVEL
 
 	.text
 
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 47ef8fd..22754e0 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -299,7 +299,7 @@
 	 * The cr16 interval timers are not syncronized across CPUs, so mark
 	 * them unstable and lower rating on SMP systems.
 	 */
-	if (num_online_cpus() > 1) {
+	if (num_online_cpus() > 1 && !running_on_qemu) {
 		clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
 		clocksource_cr16.rating = 0;
 	}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bc11d709f..062b006 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,7 +129,7 @@
 	select ARCH_HAS_GCOV_PROFILE_ALL
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_CMOS_UPDATE
-	select GENERIC_CPU_VULNERABILITIES	if PPC_BOOK3S_64
+	select GENERIC_CPU_VULNERABILITIES	if PPC_BARRIER_NOSPEC
 	select GENERIC_TIME_VSYSCALL_OLD
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -165,6 +165,11 @@
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
 
+config PPC_BARRIER_NOSPEC
+    bool
+    default y
+    depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
 
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index e0baba1..f3daa17 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -121,4 +121,10 @@
 extern int __cmpdi2(s64, s64);
 extern int __ucmpdi2(u64, u64);
 
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+
+extern long flush_count_cache;
+
 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 798ab37..80024c4 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -77,6 +77,27 @@
 
 #define smp_mb__before_spinlock()   smp_mb()
 
+#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT   nop
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define NOSPEC_BARRIER_SLOT   nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+/*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
+
+// This also acts as a compiler barrier due to the memory clobber.
+#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
+#define barrier_nospec_asm
+#define barrier_nospec()
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
 #include <asm-generic/barrier.h>
 
 #endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 0000000..ed7b144
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+	.pushsection ".rodata"
+	.balign 4
+	.global \name
+\name:
+	.4byte	\label - .
+	.popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index b4ab1f4..ab934f8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -28,6 +28,8 @@
 				unsigned long target, int flags);
 int patch_branch(unsigned int *addr, unsigned long target, int flags);
 int patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_instruction_site(s32 *addr, unsigned int instr);
+int patch_branch_site(s32 *site, unsigned long target, int flags);
 
 int instr_is_relative_branch(unsigned int instr);
 int instr_is_relative_link_branch(unsigned int instr);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 0bf8202..175128e 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -213,6 +213,25 @@
 	FTR_ENTRY_OFFSET 951b-952b;			\
 	.popsection;
 
+#define NOSPEC_BARRIER_FIXUP_SECTION			\
+953:							\
+	.pushsection __barrier_nospec_fixup,"a";	\
+	.align 2;					\
+954:							\
+	FTR_ENTRY_OFFSET 953b-954b;			\
+	.popsection;
+
+#define START_BTB_FLUSH_SECTION			\
+955:							\
+
+#define END_BTB_FLUSH_SECTION			\
+956:							\
+	.pushsection __btb_flush_fixup,"a";	\
+	.align 2;							\
+957:						\
+	FTR_ENTRY_OFFSET 955b-957b;			\
+	FTR_ENTRY_OFFSET 956b-957b;			\
+	.popsection;
 
 #ifndef __ASSEMBLY__
 
@@ -220,6 +239,8 @@
 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
 
 #endif
 
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 9d97810..9587d30 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -316,10 +316,12 @@
 #define H_CPU_CHAR_BRANCH_HINTS_HONORED	(1ull << 58) // IBM bit 5
 #define H_CPU_CHAR_THREAD_RECONFIG_CTRL	(1ull << 57) // IBM bit 6
 #define H_CPU_CHAR_COUNT_CACHE_DISABLED	(1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST	(1ull << 54) // IBM bit 9
 
 #define H_CPU_BEHAV_FAVOUR_SECURITY	(1ull << 63) // IBM bit 0
 #define H_CPU_BEHAV_L1D_FLUSH_PR	(1ull << 62) // IBM bit 1
 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR	(1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE	(1ull << 58) // IBM bit 5
 
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c4ced1d..48e8f1f 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -225,6 +225,7 @@
 /* Misc instructions for BPF compiler */
 #define PPC_INST_LBZ			0x88000000
 #define PPC_INST_LD			0xe8000000
+#define PPC_INST_LDX			0x7c00002a
 #define PPC_INST_LHZ			0xa0000000
 #define PPC_INST_LWZ			0x80000000
 #define PPC_INST_LHBRX			0x7c00062c
@@ -232,6 +233,7 @@
 #define PPC_INST_STB			0x98000000
 #define PPC_INST_STH			0xb0000000
 #define PPC_INST_STD			0xf8000000
+#define PPC_INST_STDX			0x7c00012a
 #define PPC_INST_STDU			0xf8000001
 #define PPC_INST_STW			0x90000000
 #define PPC_INST_STWU			0x94000000
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c73750b..bbd35ba 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -437,7 +437,7 @@
 .machine push ;					\
 .machine "power4" ;				\
        lis     scratch,0x60000000@h;		\
-       dcbt    r0,scratch,0b01010;		\
+       dcbt    0,scratch,0b01010;		\
 .machine pop
 
 /*
@@ -780,4 +780,25 @@
 	.long 0x2400004c  /* rfid				*/
 #endif /* !CONFIG_PPC_BOOK3E */
 #endif /*  __ASSEMBLY__ */
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target)		\
+	stringify_in_c(.section __ex_table,"a";)\
+	stringify_in_c(.balign 4;)		\
+	stringify_in_c(.long (_fault) - . ;)	\
+	stringify_in_c(.long (_target) - . ;)	\
+	stringify_in_c(.previous)
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define BTB_FLUSH(reg)			\
+	lis reg,BUCSR_INIT@h;		\
+	ori reg,reg,BUCSR_INIT@l;	\
+	mtspr SPRN_BUCSR,reg;		\
+	isync;
+#else
+#define BTB_FLUSH(reg)
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
 #endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 737e012..319ed53 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -41,7 +41,7 @@
 #if defined(CONFIG_PPC_BOOK3E_64)
 #define MSR_64BIT	MSR_CM
 
-#define MSR_		(MSR_ME | MSR_CE)
+#define MSR_		(MSR_ME | MSR_RI | MSR_CE)
 #define MSR_KERNEL	(MSR_ | MSR_64BIT)
 #define MSR_USER32	(MSR_ | MSR_PR | MSR_EE)
 #define MSR_USER64	(MSR_USER32 | MSR_64BIT)
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 44989b2..759597b 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -22,6 +22,7 @@
 
 void setup_stf_barrier(void);
 void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
 
 static inline void security_ftr_set(unsigned long feature)
 {
@@ -59,6 +60,9 @@
 // Indirect branch prediction cache disabled
 #define SEC_FTR_COUNT_CACHE_DISABLED	0x0000000000000020ull
 
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST	0x0000000000000800ull
+
 
 // Features indicating need for Spectre/Meltdown mitigations
 
@@ -74,6 +78,9 @@
 // Firmware configuration indicates user favours security over performance
 #define SEC_FTR_FAVOUR_SECURITY		0x0000000000000200ull
 
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE	0x0000000000000400ull
+
 
 // Features enabled by default
 #define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 3f160cd..862ebce 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,6 +8,7 @@
 
 extern unsigned int rtas_data;
 extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
 extern unsigned long klimit;
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
@@ -50,6 +51,26 @@
 
 void setup_rfi_flush(enum l1d_flush_type, bool enable);
 void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { };
+#endif
+void do_barrier_nospec_fixups(bool enable);
+extern bool barrier_nospec_enabled;
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+#else
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+#endif
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void setup_spectre_v2(void);
+#else
+static inline void setup_spectre_v2(void) {};
+#endif
+void do_btb_flush_fixups(void);
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b..229c91b 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -90,6 +90,8 @@
 #define topology_sibling_cpumask(cpu)	(per_cpu(cpu_sibling_map, cpu))
 #define topology_core_cpumask(cpu)	(per_cpu(cpu_core_map, cpu))
 #define topology_core_id(cpu)		(cpu_to_core_id(cpu))
+
+int dlpar_cpu_readd(int cpu);
 #endif
 #endif
 
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 31913b3..da85215 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -269,6 +269,7 @@
 	__chk_user_ptr(ptr);					\
 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
 		might_fault();					\
+	barrier_nospec();					\
 	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
 	(x) = (__typeof__(*(ptr)))__gu_val;			\
 	__gu_err;						\
@@ -280,8 +281,10 @@
 	unsigned long  __gu_val = 0;					\
 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
 	might_fault();							\
-	if (access_ok(VERIFY_READ, __gu_addr, (size)))			\
+	if (access_ok(VERIFY_READ, __gu_addr, (size))) {		\
+		barrier_nospec();					\
 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	}								\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
 	__gu_err;							\
 })
@@ -292,6 +295,7 @@
 	unsigned long __gu_val;					\
 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
 	__chk_user_ptr(ptr);					\
+	barrier_nospec();					\
 	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
 	__gu_err;						\
@@ -348,15 +352,19 @@
 
 		switch (n) {
 		case 1:
+			barrier_nospec();
 			__get_user_size(*(u8 *)to, from, 1, ret);
 			break;
 		case 2:
+			barrier_nospec();
 			__get_user_size(*(u16 *)to, from, 2, ret);
 			break;
 		case 4:
+			barrier_nospec();
 			__get_user_size(*(u32 *)to, from, 4, ret);
 			break;
 		case 8:
+			barrier_nospec();
 			__get_user_size(*(u64 *)to, from, 8, ret);
 			break;
 		}
@@ -366,6 +374,7 @@
 
 	check_object_size(to, n, false);
 
+	barrier_nospec();
 	return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1388578..d80fbf0 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -44,9 +44,10 @@
 obj-$(CONFIG_VDSO32)		+= vdso32/
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_ppc970.o cpu_setup_pa6t.o
-obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_power.o security.o
+obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_power.o
 obj-$(CONFIG_PPC_BOOK3S_64)	+= mce.o mce_power.o
 obj-$(CONFIG_PPC_BOOK3E_64)	+= exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
 obj-$(CONFIG_PPC64)		+= vdso64/
 obj-$(CONFIG_ALTIVEC)		+= vecemu.o
 obj-$(CONFIG_PPC_970_NAP)	+= idle_power4.o
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3841d74..bdd88f9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -34,6 +34,7 @@
 #include <asm/ftrace.h>
 #include <asm/ptrace.h>
 #include <asm/export.h>
+#include <asm/barrier.h>
 
 /*
  * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -347,6 +348,15 @@
 	ori	r10,r10,sys_call_table@l
 	slwi	r0,r0,2
 	bge-	66f
+
+	barrier_nospec_asm
+	/*
+	 * Prevent the load of the handler below (based on the user-passed
+	 * system call number) being speculatively executed until the test
+	 * against NR_syscalls and branch to .66f above has
+	 * committed.
+	 */
+
 	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
 	mtlr	r10
 	addi	r9,r1,STACK_FRAME_OVERHEAD
@@ -698,6 +708,9 @@
 	mtcr	r10
 	lwz	r10,_LINK(r11)
 	mtlr	r10
+	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
+	li	r10, 0
+	stw	r10, 8(r11)
 	REST_GPR(10, r11)
 	mtspr	SPRN_SRR1,r9
 	mtspr	SPRN_SRR0,r12
@@ -932,6 +945,9 @@
 	mtcrf	0xFF,r10
 	mtlr	r11
 
+	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
+	li	r10, 0
+	stw	r10, 8(r1)
 	/*
 	 * Once we put values in SRR0 and SRR1, we are in a state
 	 * where exceptions are not recoverable, since taking an
@@ -969,6 +985,9 @@
 	mtlr	r11
 	lwz	r10,_CCR(r1)
 	mtcrf	0xff,r10
+	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
+	li	r10, 0
+	stw	r10, 8(r1)
 	REST_2GPRS(9, r1)
 	.globl exc_exit_restart
 exc_exit_restart:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e24ae0f..390ebf4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -26,6 +26,7 @@
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/thread_info.h>
+#include <asm/code-patching-asm.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
@@ -38,6 +39,7 @@
 #include <asm/context_tracking.h>
 #include <asm/tm.h>
 #include <asm/ppc-opcode.h>
+#include <asm/barrier.h>
 #include <asm/export.h>
 #ifdef CONFIG_PPC_BOOK3S
 #include <asm/exception-64s.h>
@@ -78,6 +80,11 @@
 	std	r0,GPR0(r1)
 	std	r10,GPR1(r1)
 	beq	2f			/* if from kernel mode */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+	BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
 	ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
 2:	std	r2,GPR2(r1)
 	std	r3,GPR3(r1)
@@ -180,6 +187,15 @@
 	clrldi	r8,r8,32
 15:
 	slwi	r0,r0,4
+
+	barrier_nospec_asm
+	/*
+	 * Prevent the load of the handler below (based on the user-passed
+	 * system call number) being speculatively executed until the test
+	 * against NR_syscalls and branch to .Lsyscall_enosys above has
+	 * committed.
+	 */
+
 	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
 	mtctr   r12
 	bctrl			/* Call handler */
@@ -473,6 +489,57 @@
 	li	r3,0
 	b	.Lsyscall_exit
 
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define FLUSH_COUNT_CACHE	\
+1:	nop;			\
+	patch_site 1b, patch__call_flush_count_cache
+
+
+#define BCCTR_FLUSH	.long 0x4c400420
+
+.macro nops number
+	.rept \number
+	nop
+	.endr
+.endm
+
+.balign 32
+.global flush_count_cache
+flush_count_cache:
+	/* Save LR into r9 */
+	mflr	r9
+
+	.rept 64
+	bl	.+4
+	.endr
+	b	1f
+	nops	6
+
+	.balign 32
+	/* Restore LR */
+1:	mtlr	r9
+	li	r9,0x7fff
+	mtctr	r9
+
+	BCCTR_FLUSH
+
+2:	nop
+	patch_site 2b patch__flush_count_cache_return
+
+	nops	3
+
+	.rept 278
+	.balign 32
+	BCCTR_FLUSH
+	nops	7
+	.endr
+
+	blr
+#else
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
 /*
  * This routine switches between two different tasks.  The process
  * state of one is saved on its kernel stack.  Then the state
@@ -504,6 +571,8 @@
 	std	r23,_CCR(r1)
 	std	r1,KSP(r3)	/* Set old stack pointer */
 
+	FLUSH_COUNT_CACHE
+
 #ifdef CONFIG_SMP
 	/* We need a sync somewhere here to make sure that if the
 	 * previous task gets rescheduled on another CPU, it sees all
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ca03eb2..423b525 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -295,7 +295,8 @@
 	andi.	r10,r11,MSR_PR;		/* save stack pointer */	    \
 	beq	1f;			/* branch around if supervisor */   \
 	ld	r1,PACAKSAVE(r13);	/* get kernel stack coming from usr */\
-1:	cmpdi	cr1,r1,0;		/* check if SP makes sense */	    \
+1:	type##_BTB_FLUSH		\
+	cmpdi	cr1,r1,0;		/* check if SP makes sense */	    \
 	bge-	cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
 	mfspr	r10,SPRN_##type##_SRR0;	/* read SRR0 before touching stack */
 
@@ -327,6 +328,30 @@
 #define SPRN_MC_SRR0	SPRN_MCSRR0
 #define SPRN_MC_SRR1	SPRN_MCSRR1
 
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define GEN_BTB_FLUSH			\
+	START_BTB_FLUSH_SECTION		\
+		beq 1f;			\
+		BTB_FLUSH(r10)			\
+		1:		\
+	END_BTB_FLUSH_SECTION
+
+#define CRIT_BTB_FLUSH			\
+	START_BTB_FLUSH_SECTION		\
+		BTB_FLUSH(r10)		\
+	END_BTB_FLUSH_SECTION
+
+#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
+#define MC_BTB_FLUSH CRIT_BTB_FLUSH
+#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
+#else
+#define GEN_BTB_FLUSH
+#define CRIT_BTB_FLUSH
+#define DBG_BTB_FLUSH
+#define MC_BTB_FLUSH
+#define GDBELL_BTB_FLUSH
+#endif
+
 #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition)			    \
 	EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
 
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index a620203..7b98c73 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -31,6 +31,16 @@
  */
 #define THREAD_NORMSAVE(offset)	(THREAD_NORMSAVES + (offset * 4))
 
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define BOOKE_CLEAR_BTB(reg)									\
+START_BTB_FLUSH_SECTION								\
+	BTB_FLUSH(reg)									\
+END_BTB_FLUSH_SECTION
+#else
+#define BOOKE_CLEAR_BTB(reg)
+#endif
+
+
 #define NORMAL_EXCEPTION_PROLOG(intno)						     \
 	mtspr	SPRN_SPRG_WSCRATCH0, r10;	/* save one register */	     \
 	mfspr	r10, SPRN_SPRG_THREAD;					     \
@@ -42,6 +52,7 @@
 	andi.	r11, r11, MSR_PR;	/* check whether user or kernel    */\
 	mr	r11, r1;						     \
 	beq	1f;							     \
+	BOOKE_CLEAR_BTB(r11)						\
 	/* if from user, start at top of this thread's kernel stack */       \
 	lwz	r11, THREAD_INFO-THREAD(r10);				     \
 	ALLOC_STACK_FRAME(r11, THREAD_SIZE);				     \
@@ -127,6 +138,7 @@
 	stw	r9,_CCR(r8);		/* save CR on stack		   */\
 	mfspr	r11,exc_level_srr1;	/* check whether user or kernel    */\
 	DO_KVM	BOOKE_INTERRUPT_##intno exc_level_srr1;		             \
+	BOOKE_CLEAR_BTB(r10)						\
 	andi.	r11,r11,MSR_PR;						     \
 	mfspr	r11,SPRN_SPRG_THREAD;	/* if from user, start at top of   */\
 	lwz	r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index bf4c602..60a0aee 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -452,6 +452,13 @@
 	mfcr	r13
 	stw	r13, THREAD_NORMSAVE(3)(r10)
 	DO_KVM	BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+	mfspr r11, SPRN_SRR1
+	andi. r10,r11,MSR_PR
+	beq 1f
+	BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
 	mfspr	r10, SPRN_DEAR		/* Get faulting address */
 
 	/* If we are faulting a kernel address, we have to use the
@@ -546,6 +553,14 @@
 	mfcr	r13
 	stw	r13, THREAD_NORMSAVE(3)(r10)
 	DO_KVM	BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+	mfspr r11, SPRN_SRR1
+	andi. r10,r11,MSR_PR
+	beq 1f
+	BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
+
 	mfspr	r10, SPRN_SRR0		/* Get faulting address */
 
 	/* If we are faulting a kernel address, we have to use the
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 30b89d5..3b1c3bb 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -72,7 +72,15 @@
 		do_feature_fixups(powerpc_firmware_features,
 				  (void *)sect->sh_addr,
 				  (void *)sect->sh_addr + sect->sh_size);
-#endif
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+	sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
+	if (sect != NULL)
+		do_barrier_nospec_fixups_range(barrier_nospec_enabled,
+				  (void *)sect->sh_addr,
+				  (void *)sect->sh_addr + sect->sh_size);
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
 
 	sect = find_section(hdr, sechdrs, "__lwsync_fixup");
 	if (sect != NULL)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1c141d5..609f0e8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -153,7 +153,7 @@
 
 	save_fpu(tsk);
 	msr = tsk->thread.regs->msr;
-	msr &= ~MSR_FP;
+	msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
 #ifdef CONFIG_VSX
 	if (cpu_has_feature(CPU_FTR_VSX))
 		msr &= ~MSR_VSX;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index adfa63e..4f28296 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -547,6 +547,7 @@
 		/*
 		 * Copy out only the low-order word of vrsave.
 		 */
+		int start, end;
 		union {
 			elf_vrreg_t reg;
 			u32 word;
@@ -555,8 +556,10 @@
 
 		vrsave.word = target->thread.vrsave;
 
+		start = 33 * sizeof(vector128);
+		end = start + sizeof(vrsave);
 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
-					  33 * sizeof(vector128), -1);
+					  start, end);
 	}
 
 	return ret;
@@ -594,6 +597,7 @@
 		/*
 		 * We use only the first word of vrsave.
 		 */
+		int start, end;
 		union {
 			elf_vrreg_t reg;
 			u32 word;
@@ -602,8 +606,10 @@
 
 		vrsave.word = target->thread.vrsave;
 
+		start = 33 * sizeof(vector128);
+		end = start + sizeof(vrsave);
 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
-					 33 * sizeof(vector128), -1);
+					 start, end);
 		if (!ret)
 			target->thread.vrsave = vrsave.word;
 	}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 2277df8..f4a98d9 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -4,16 +4,127 @@
 //
 // Copyright 2018, Michael Ellerman, IBM Corporation.
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/seq_buf.h>
 
+#include <asm/asm-prototypes.h>
+#include <asm/code-patching.h>
+#include <asm/debug.h>
 #include <asm/security_features.h>
+#include <asm/setup.h>
 
 
 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
 
+enum count_cache_flush_type {
+	COUNT_CACHE_FLUSH_NONE	= 0x1,
+	COUNT_CACHE_FLUSH_SW	= 0x2,
+	COUNT_CACHE_FLUSH_HW	= 0x4,
+};
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+
+bool barrier_nospec_enabled;
+static bool no_nospec;
+static bool btb_flush_enabled;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+static bool no_spectrev2;
+#endif
+
+static void enable_barrier_nospec(bool enable)
+{
+	barrier_nospec_enabled = enable;
+	do_barrier_nospec_fixups(enable);
+}
+
+void setup_barrier_nospec(void)
+{
+	bool enable;
+
+	/*
+	 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
+	 * But there's a good reason not to. The two flags we check below are
+	 * both are enabled by default in the kernel, so if the hcall is not
+	 * functional they will be enabled.
+	 * On a system where the host firmware has been updated (so the ori
+	 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
+	 * not been updated, we would like to enable the barrier. Dropping the
+	 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
+	 * we potentially enable the barrier on systems where the host firmware
+	 * is not updated, but that's harmless as it's a no-op.
+	 */
+	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+		 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
+
+	if (!no_nospec)
+		enable_barrier_nospec(enable);
+}
+
+static int __init handle_nospectre_v1(char *p)
+{
+	no_nospec = true;
+
+	return 0;
+}
+early_param("nospectre_v1", handle_nospectre_v1);
+
+#ifdef CONFIG_DEBUG_FS
+static int barrier_nospec_set(void *data, u64 val)
+{
+	switch (val) {
+	case 0:
+	case 1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!!val == !!barrier_nospec_enabled)
+		return 0;
+
+	enable_barrier_nospec(!!val);
+
+	return 0;
+}
+
+static int barrier_nospec_get(void *data, u64 *val)
+{
+	*val = barrier_nospec_enabled ? 1 : 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
+			barrier_nospec_get, barrier_nospec_set, "%llu\n");
+
+static __init int barrier_nospec_debugfs_init(void)
+{
+	debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
+			    &fops_barrier_nospec);
+	return 0;
+}
+device_initcall(barrier_nospec_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+static int __init handle_nospectre_v2(char *p)
+{
+	no_spectrev2 = true;
+
+	return 0;
+}
+early_param("nospectre_v2", handle_nospectre_v2);
+void setup_spectre_v2(void)
+{
+	if (no_spectrev2)
+		do_btb_flush_fixups();
+	else
+		btb_flush_enabled = true;
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
+#ifdef CONFIG_PPC_BOOK3S_64
 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	bool thread_priv;
@@ -46,25 +157,39 @@
 
 	return sprintf(buf, "Vulnerable\n");
 }
+#endif
 
 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
-		return sprintf(buf, "Not affected\n");
+	struct seq_buf s;
 
-	return sprintf(buf, "Vulnerable\n");
+	seq_buf_init(&s, buf, PAGE_SIZE - 1);
+
+	if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
+		if (barrier_nospec_enabled)
+			seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
+		else
+			seq_buf_printf(&s, "Vulnerable");
+
+		if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
+			seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+
+		seq_buf_printf(&s, "\n");
+	} else
+		seq_buf_printf(&s, "Not affected\n");
+
+	return s.len;
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	bool bcs, ccd, ori;
 	struct seq_buf s;
+	bool bcs, ccd;
 
 	seq_buf_init(&s, buf, PAGE_SIZE - 1);
 
 	bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
 	ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
-	ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
 
 	if (bcs || ccd) {
 		seq_buf_printf(&s, "Mitigation: ");
@@ -77,17 +202,23 @@
 
 		if (ccd)
 			seq_buf_printf(&s, "Indirect branch cache disabled");
-	} else
-		seq_buf_printf(&s, "Vulnerable");
+	} else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+		seq_buf_printf(&s, "Mitigation: Software count cache flush");
 
-	if (ori)
-		seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+		if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+			seq_buf_printf(&s, " (hardware accelerated)");
+	} else if (btb_flush_enabled) {
+		seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
+	} else {
+		seq_buf_printf(&s, "Vulnerable");
+	}
 
 	seq_buf_printf(&s, "\n");
 
 	return s.len;
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
 /*
  * Store-forwarding barrier support.
  */
@@ -235,3 +366,71 @@
 }
 device_initcall(stf_barrier_debugfs_init);
 #endif /* CONFIG_DEBUG_FS */
+
+static void toggle_count_cache_flush(bool enable)
+{
+	if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+		patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+		count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+		pr_info("count-cache-flush: software flush disabled.\n");
+		return;
+	}
+
+	patch_branch_site(&patch__call_flush_count_cache,
+			  (u64)&flush_count_cache, BRANCH_SET_LINK);
+
+	if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+		count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
+		pr_info("count-cache-flush: full software flush sequence enabled.\n");
+		return;
+	}
+
+	patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+	count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
+	pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+}
+
+void setup_count_cache_flush(void)
+{
+	toggle_count_cache_flush(true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int count_cache_flush_set(void *data, u64 val)
+{
+	bool enable;
+
+	if (val == 1)
+		enable = true;
+	else if (val == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	toggle_count_cache_flush(enable);
+
+	return 0;
+}
+
+static int count_cache_flush_get(void *data, u64 *val)
+{
+	if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+		*val = 0;
+	else
+		*val = 1;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+			count_cache_flush_set, "%llu\n");
+
+static __init int count_cache_flush_debugfs_init(void)
+{
+	debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
+			    NULL, &fops_count_cache_flush);
+	return 0;
+}
+device_initcall(count_cache_flush_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index bf0f712..5e7d70c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -918,6 +918,9 @@
 	if (ppc_md.setup_arch)
 		ppc_md.setup_arch();
 
+	setup_barrier_nospec();
+	setup_spectre_v2();
+
 	paging_init();
 
 	/* Initialize the MMU context management stuff. */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d929afa..bdf2f7b 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -746,12 +746,25 @@
 		if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
 					   &uc_transact->uc_mcontext))
 			goto badframe;
-	}
-	else
-	/* Fall through, for non-TM restore */
+	} else
 #endif
-	if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
-		goto badframe;
+	{
+		/*
+		 * Fall through, for non-TM restore
+		 *
+		 * Unset MSR[TS] on the thread regs since MSR from user
+		 * context does not have MSR active, and recheckpoint was
+		 * not called since restore_tm_sigcontexts() was not called
+		 * also.
+		 *
+		 * If not unsetting it, the code can RFID to userspace with
+		 * MSR[TS] set, but without CPU in the proper state,
+		 * causing a TM bad thing.
+		 */
+		current->thread.regs->msr &= ~MSR_TS_MASK;
+		if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+			goto badframe;
+	}
 
 	if (restore_altstack(&uc->uc_stack))
 		goto badframe;
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 988f38d..82d8aae 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -179,7 +179,7 @@
 	sld	r3, r3, r0
 	li	r0, 0
 1:
-	dcbf	r0,r3
+	dcbf	0,r3
 	addi	r3,r3,0x20
 	bdnz	1b
 
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index c16fddb..50d3650 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -153,8 +153,25 @@
 		*(__rfi_flush_fixup)
 		__stop___rfi_flush_fixup = .;
 	}
-#endif
+#endif /* CONFIG_PPC64 */
 
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+	. = ALIGN(8);
+	__spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
+		__start___barrier_nospec_fixup = .;
+		*(__barrier_nospec_fixup)
+		__stop___barrier_nospec_fixup = .;
+	}
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+	. = ALIGN(8);
+	__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
+		__start__btb_flush_fixup = .;
+		*(__btb_flush_fixup)
+		__stop__btb_flush_fixup = .;
+	}
+#endif
 	EXCEPTION_TABLE(0)
 
 	NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 81bd8a07..612b7f6 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -75,6 +75,10 @@
 	PPC_LL	r1, VCPU_HOST_STACK(r4)
 	PPC_LL	r2, HOST_R2(r1)
 
+START_BTB_FLUSH_SECTION
+	BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+
 	mfspr	r10, SPRN_PID
 	lwz	r8, VCPU_HOST_PID(r4)
 	PPC_LL	r11, VCPU_SHARED(r4)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 990db69..fa88f64 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -277,6 +277,13 @@
 		vcpu->arch.pwrmgtcr0 = spr_val;
 		break;
 
+	case SPRN_BUCSR:
+		/*
+		 * If we are here, it means that we have already flushed the
+		 * branch predictor, so just return to guest.
+		 */
+		break;
+
 	/* extra exceptions */
 #ifdef CONFIG_SPE_POSSIBLE
 	case SPRN_IVOR32:
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 753d591..c312955 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -14,12 +14,20 @@
 #include <asm/page.h>
 #include <asm/code-patching.h>
 #include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
 
 
 int patch_instruction(unsigned int *addr, unsigned int instr)
 {
 	int err;
 
+	/* Make sure we aren't patching a freed init section */
+	if (*PTRRELOC(&init_mem_is_free) && init_section_contains(addr, 4)) {
+		pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+		return 0;
+	}
+
 	__put_user_size(instr, addr, 4, err);
 	if (err)
 		return err;
@@ -32,6 +40,22 @@
 	return patch_instruction(addr, create_branch(addr, target, flags));
 }
 
+int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+	unsigned int *addr;
+
+	addr = (unsigned int *)((unsigned long)site + *site);
+	return patch_instruction(addr, create_branch(addr, target, flags));
+}
+
+int patch_instruction_site(s32 *site, unsigned int instr)
+{
+	unsigned int *addr;
+
+	addr = (unsigned int *)((unsigned long)site + *site);
+	return patch_instruction(addr, instr);
+}
+
 unsigned int create_branch(const unsigned int *addr,
 			   unsigned long target, int flags)
 {
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a84d333..ca5fc8f 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -45,13 +45,13 @@
 .machine push
 .machine "power4"
 	/* setup read stream 0  */
-	dcbt	r0,r4,0b01000  	/* addr from */
-	dcbt	r0,r7,0b01010   /* length and depth from */
+	dcbt	0,r4,0b01000  	/* addr from */
+	dcbt	0,r7,0b01010   /* length and depth from */
 	/* setup write stream 1 */
-	dcbtst	r0,r9,0b01000   /* addr to */
-	dcbtst	r0,r10,0b01010  /* length and depth to */
+	dcbtst	0,r9,0b01000   /* addr to */
+	dcbtst	0,r10,0b01010  /* length and depth to */
 	eieio
-	dcbt	r0,r8,0b01010	/* all streams GO */
+	dcbt	0,r8,0b01010	/* all streams GO */
 .machine pop
 
 #ifdef CONFIG_ALTIVEC
@@ -83,7 +83,7 @@
 	li	r12,112
 
 	.align	5
-1:	lvx	v7,r0,r4
+1:	lvx	v7,0,r4
 	lvx	v6,r4,r6
 	lvx	v5,r4,r7
 	lvx	v4,r4,r8
@@ -92,7 +92,7 @@
 	lvx	v1,r4,r11
 	lvx	v0,r4,r12
 	addi	r4,r4,128
-	stvx	v7,r0,r3
+	stvx	v7,0,r3
 	stvx	v6,r3,r6
 	stvx	v5,r3,r7
 	stvx	v4,r3,r8
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index da0c568..3916948 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -327,13 +327,13 @@
 .machine push
 .machine "power4"
 	/* setup read stream 0 */
-	dcbt	r0,r6,0b01000   /* addr from */
-	dcbt	r0,r7,0b01010   /* length and depth from */
+	dcbt	0,r6,0b01000   /* addr from */
+	dcbt	0,r7,0b01010   /* length and depth from */
 	/* setup write stream 1 */
-	dcbtst	r0,r9,0b01000   /* addr to */
-	dcbtst	r0,r10,0b01010  /* length and depth to */
+	dcbtst	0,r9,0b01000   /* addr to */
+	dcbtst	0,r10,0b01010  /* length and depth to */
 	eieio
-	dcbt	r0,r8,0b01010	/* all streams GO */
+	dcbt	0,r8,0b01010	/* all streams GO */
 .machine pop
 
 	beq	cr1,.Lunwind_stack_nonvmx_copy
@@ -388,26 +388,26 @@
 	li	r11,48
 
 	bf	cr7*4+3,5f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	addi	r4,r4,16
-err3;	stvx	v1,r0,r3
+err3;	stvx	v1,0,r3
 	addi	r3,r3,16
 
 5:	bf	cr7*4+2,6f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 err3;	lvx	v0,r4,r9
 	addi	r4,r4,32
-err3;	stvx	v1,r0,r3
+err3;	stvx	v1,0,r3
 err3;	stvx	v0,r3,r9
 	addi	r3,r3,32
 
 6:	bf	cr7*4+1,7f
-err3;	lvx	v3,r0,r4
+err3;	lvx	v3,0,r4
 err3;	lvx	v2,r4,r9
 err3;	lvx	v1,r4,r10
 err3;	lvx	v0,r4,r11
 	addi	r4,r4,64
-err3;	stvx	v3,r0,r3
+err3;	stvx	v3,0,r3
 err3;	stvx	v2,r3,r9
 err3;	stvx	v1,r3,r10
 err3;	stvx	v0,r3,r11
@@ -433,7 +433,7 @@
 	 */
 	.align	5
 8:
-err4;	lvx	v7,r0,r4
+err4;	lvx	v7,0,r4
 err4;	lvx	v6,r4,r9
 err4;	lvx	v5,r4,r10
 err4;	lvx	v4,r4,r11
@@ -442,7 +442,7 @@
 err4;	lvx	v1,r4,r15
 err4;	lvx	v0,r4,r16
 	addi	r4,r4,128
-err4;	stvx	v7,r0,r3
+err4;	stvx	v7,0,r3
 err4;	stvx	v6,r3,r9
 err4;	stvx	v5,r3,r10
 err4;	stvx	v4,r3,r11
@@ -463,29 +463,29 @@
 	mtocrf	0x01,r6
 
 	bf	cr7*4+1,9f
-err3;	lvx	v3,r0,r4
+err3;	lvx	v3,0,r4
 err3;	lvx	v2,r4,r9
 err3;	lvx	v1,r4,r10
 err3;	lvx	v0,r4,r11
 	addi	r4,r4,64
-err3;	stvx	v3,r0,r3
+err3;	stvx	v3,0,r3
 err3;	stvx	v2,r3,r9
 err3;	stvx	v1,r3,r10
 err3;	stvx	v0,r3,r11
 	addi	r3,r3,64
 
 9:	bf	cr7*4+2,10f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 err3;	lvx	v0,r4,r9
 	addi	r4,r4,32
-err3;	stvx	v1,r0,r3
+err3;	stvx	v1,0,r3
 err3;	stvx	v0,r3,r9
 	addi	r3,r3,32
 
 10:	bf	cr7*4+3,11f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	addi	r4,r4,16
-err3;	stvx	v1,r0,r3
+err3;	stvx	v1,0,r3
 	addi	r3,r3,16
 
 	/* Up to 15B to go */
@@ -565,25 +565,25 @@
 	addi	r4,r4,16
 
 	bf	cr7*4+3,5f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	addi	r4,r4,16
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 	addi	r3,r3,16
 	vor	v0,v1,v1
 
 5:	bf	cr7*4+2,6f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 err3;	lvx	v0,r4,r9
 	VPERM(v9,v1,v0,v16)
 	addi	r4,r4,32
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 err3;	stvx	v9,r3,r9
 	addi	r3,r3,32
 
 6:	bf	cr7*4+1,7f
-err3;	lvx	v3,r0,r4
+err3;	lvx	v3,0,r4
 	VPERM(v8,v0,v3,v16)
 err3;	lvx	v2,r4,r9
 	VPERM(v9,v3,v2,v16)
@@ -592,7 +592,7 @@
 err3;	lvx	v0,r4,r11
 	VPERM(v11,v1,v0,v16)
 	addi	r4,r4,64
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 err3;	stvx	v9,r3,r9
 err3;	stvx	v10,r3,r10
 err3;	stvx	v11,r3,r11
@@ -618,7 +618,7 @@
 	 */
 	.align	5
 8:
-err4;	lvx	v7,r0,r4
+err4;	lvx	v7,0,r4
 	VPERM(v8,v0,v7,v16)
 err4;	lvx	v6,r4,r9
 	VPERM(v9,v7,v6,v16)
@@ -635,7 +635,7 @@
 err4;	lvx	v0,r4,r16
 	VPERM(v15,v1,v0,v16)
 	addi	r4,r4,128
-err4;	stvx	v8,r0,r3
+err4;	stvx	v8,0,r3
 err4;	stvx	v9,r3,r9
 err4;	stvx	v10,r3,r10
 err4;	stvx	v11,r3,r11
@@ -656,7 +656,7 @@
 	mtocrf	0x01,r6
 
 	bf	cr7*4+1,9f
-err3;	lvx	v3,r0,r4
+err3;	lvx	v3,0,r4
 	VPERM(v8,v0,v3,v16)
 err3;	lvx	v2,r4,r9
 	VPERM(v9,v3,v2,v16)
@@ -665,27 +665,27 @@
 err3;	lvx	v0,r4,r11
 	VPERM(v11,v1,v0,v16)
 	addi	r4,r4,64
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 err3;	stvx	v9,r3,r9
 err3;	stvx	v10,r3,r10
 err3;	stvx	v11,r3,r11
 	addi	r3,r3,64
 
 9:	bf	cr7*4+2,10f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 err3;	lvx	v0,r4,r9
 	VPERM(v9,v1,v0,v16)
 	addi	r4,r4,32
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 err3;	stvx	v9,r3,r9
 	addi	r3,r3,32
 
 10:	bf	cr7*4+3,11f
-err3;	lvx	v1,r0,r4
+err3;	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	addi	r4,r4,16
-err3;	stvx	v8,r0,r3
+err3;	stvx	v8,0,r3
 	addi	r3,r3,16
 
 	/* Up to 15B to go */
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index cf1398e..e6ed0ec 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -277,8 +277,101 @@
 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
 						: "unknown");
 }
+
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+	unsigned int instr, *dest;
+	long *start, *end;
+	int i;
+
+	start = fixup_start;
+	end = fixup_end;
+
+	instr = 0x60000000; /* nop */
+
+	if (enable) {
+		pr_info("barrier-nospec: using ORI speculation barrier\n");
+		instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+	}
+
+	for (i = 0; start < end; start++, i++) {
+		dest = (void *)start + *start;
+
+		pr_devel("patching dest %lx\n", (unsigned long)dest);
+		patch_instruction(dest, instr);
+	}
+
+	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups(bool enable)
+{
+	void *start, *end;
+
+	start = PTRRELOC(&__start___barrier_nospec_fixup),
+	end = PTRRELOC(&__stop___barrier_nospec_fixup);
+
+	do_barrier_nospec_fixups_range(enable, start, end);
+}
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+	unsigned int instr[2], *dest;
+	long *start, *end;
+	int i;
+
+	start = fixup_start;
+	end = fixup_end;
+
+	instr[0] = PPC_INST_NOP;
+	instr[1] = PPC_INST_NOP;
+
+	if (enable) {
+		pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
+		instr[0] = PPC_INST_ISYNC;
+		instr[1] = PPC_INST_SYNC;
+	}
+
+	for (i = 0; start < end; start++, i++) {
+		dest = (void *)start + *start;
+
+		pr_devel("patching dest %lx\n", (unsigned long)dest);
+		patch_instruction(dest, instr[0]);
+		patch_instruction(dest + 1, instr[1]);
+	}
+
+	printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+
+static void patch_btb_flush_section(long *curr)
+{
+	unsigned int *start, *end;
+
+	start = (void *)curr + *curr;
+	end = (void *)curr + *(curr + 1);
+	for (; start < end; start++) {
+		pr_devel("patching dest %lx\n", (unsigned long)start);
+		patch_instruction(start, PPC_INST_NOP);
+	}
+}
+
+void do_btb_flush_fixups(void)
+{
+	long *start, *end;
+
+	start = PTRRELOC(&__start__btb_flush_fixup);
+	end = PTRRELOC(&__stop__btb_flush_fixup);
+
+	for (; start < end; start += 2)
+		patch_btb_flush_section(start);
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 {
 	long *start, *end;
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 786234f..193909a 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -261,12 +261,12 @@
 
 .machine push
 .machine "power4"
-	dcbt	r0,r6,0b01000
-	dcbt	r0,r7,0b01010
-	dcbtst	r0,r9,0b01000
-	dcbtst	r0,r10,0b01010
+	dcbt	0,r6,0b01000
+	dcbt	0,r7,0b01010
+	dcbtst	0,r9,0b01000
+	dcbtst	0,r10,0b01010
 	eieio
-	dcbt	r0,r8,0b01010	/* GO */
+	dcbt	0,r8,0b01010	/* GO */
 .machine pop
 
 	beq	cr1,.Lunwind_stack_nonvmx_copy
@@ -321,26 +321,26 @@
 	li	r11,48
 
 	bf	cr7*4+3,5f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	addi	r4,r4,16
-	stvx	v1,r0,r3
+	stvx	v1,0,r3
 	addi	r3,r3,16
 
 5:	bf	cr7*4+2,6f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	lvx	v0,r4,r9
 	addi	r4,r4,32
-	stvx	v1,r0,r3
+	stvx	v1,0,r3
 	stvx	v0,r3,r9
 	addi	r3,r3,32
 
 6:	bf	cr7*4+1,7f
-	lvx	v3,r0,r4
+	lvx	v3,0,r4
 	lvx	v2,r4,r9
 	lvx	v1,r4,r10
 	lvx	v0,r4,r11
 	addi	r4,r4,64
-	stvx	v3,r0,r3
+	stvx	v3,0,r3
 	stvx	v2,r3,r9
 	stvx	v1,r3,r10
 	stvx	v0,r3,r11
@@ -366,7 +366,7 @@
 	 */
 	.align	5
 8:
-	lvx	v7,r0,r4
+	lvx	v7,0,r4
 	lvx	v6,r4,r9
 	lvx	v5,r4,r10
 	lvx	v4,r4,r11
@@ -375,7 +375,7 @@
 	lvx	v1,r4,r15
 	lvx	v0,r4,r16
 	addi	r4,r4,128
-	stvx	v7,r0,r3
+	stvx	v7,0,r3
 	stvx	v6,r3,r9
 	stvx	v5,r3,r10
 	stvx	v4,r3,r11
@@ -396,29 +396,29 @@
 	mtocrf	0x01,r6
 
 	bf	cr7*4+1,9f
-	lvx	v3,r0,r4
+	lvx	v3,0,r4
 	lvx	v2,r4,r9
 	lvx	v1,r4,r10
 	lvx	v0,r4,r11
 	addi	r4,r4,64
-	stvx	v3,r0,r3
+	stvx	v3,0,r3
 	stvx	v2,r3,r9
 	stvx	v1,r3,r10
 	stvx	v0,r3,r11
 	addi	r3,r3,64
 
 9:	bf	cr7*4+2,10f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	lvx	v0,r4,r9
 	addi	r4,r4,32
-	stvx	v1,r0,r3
+	stvx	v1,0,r3
 	stvx	v0,r3,r9
 	addi	r3,r3,32
 
 10:	bf	cr7*4+3,11f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	addi	r4,r4,16
-	stvx	v1,r0,r3
+	stvx	v1,0,r3
 	addi	r3,r3,16
 
 	/* Up to 15B to go */
@@ -499,25 +499,25 @@
 	addi	r4,r4,16
 
 	bf	cr7*4+3,5f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	addi	r4,r4,16
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	addi	r3,r3,16
 	vor	v0,v1,v1
 
 5:	bf	cr7*4+2,6f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	lvx	v0,r4,r9
 	VPERM(v9,v1,v0,v16)
 	addi	r4,r4,32
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	stvx	v9,r3,r9
 	addi	r3,r3,32
 
 6:	bf	cr7*4+1,7f
-	lvx	v3,r0,r4
+	lvx	v3,0,r4
 	VPERM(v8,v0,v3,v16)
 	lvx	v2,r4,r9
 	VPERM(v9,v3,v2,v16)
@@ -526,7 +526,7 @@
 	lvx	v0,r4,r11
 	VPERM(v11,v1,v0,v16)
 	addi	r4,r4,64
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	stvx	v9,r3,r9
 	stvx	v10,r3,r10
 	stvx	v11,r3,r11
@@ -552,7 +552,7 @@
 	 */
 	.align	5
 8:
-	lvx	v7,r0,r4
+	lvx	v7,0,r4
 	VPERM(v8,v0,v7,v16)
 	lvx	v6,r4,r9
 	VPERM(v9,v7,v6,v16)
@@ -569,7 +569,7 @@
 	lvx	v0,r4,r16
 	VPERM(v15,v1,v0,v16)
 	addi	r4,r4,128
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	stvx	v9,r3,r9
 	stvx	v10,r3,r10
 	stvx	v11,r3,r11
@@ -590,7 +590,7 @@
 	mtocrf	0x01,r6
 
 	bf	cr7*4+1,9f
-	lvx	v3,r0,r4
+	lvx	v3,0,r4
 	VPERM(v8,v0,v3,v16)
 	lvx	v2,r4,r9
 	VPERM(v9,v3,v2,v16)
@@ -599,27 +599,27 @@
 	lvx	v0,r4,r11
 	VPERM(v11,v1,v0,v16)
 	addi	r4,r4,64
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	stvx	v9,r3,r9
 	stvx	v10,r3,r10
 	stvx	v11,r3,r11
 	addi	r3,r3,64
 
 9:	bf	cr7*4+2,10f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	lvx	v0,r4,r9
 	VPERM(v9,v1,v0,v16)
 	addi	r4,r4,32
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	stvx	v9,r3,r9
 	addi	r3,r3,32
 
 10:	bf	cr7*4+3,11f
-	lvx	v1,r0,r4
+	lvx	v1,0,r4
 	VPERM(v8,v0,v1,v16)
 	addi	r4,r4,16
-	stvx	v8,r0,r3
+	stvx	v8,0,r3
 	addi	r3,r3,16
 
 	/* Up to 15B to go */
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 57ace35..11e6372 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -192,7 +192,7 @@
 	mtctr	r6
 	mr	r8,r3
 14:
-err1;	dcbz	r0,r3
+err1;	dcbz	0,r3
 	add	r3,r3,r9
 	bdnz	14b
 
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5f84433..1e93dbc 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -62,6 +62,7 @@
 #endif
 
 unsigned long long memory_limit;
+bool init_mem_is_free;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -396,6 +397,7 @@
 void free_initmem(void)
 {
 	ppc_md.progress = ppc_printk_progress;
+	init_mem_is_free = true;
 	free_initmem_default(POISON_FREE_INITMEM);
 }
 
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0ef83c2..9cad2ed 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1540,13 +1540,6 @@
 
 #ifdef CONFIG_SMP
 
-static void stage_topology_update(int core_id)
-{
-	cpumask_or(&cpu_associativity_changes_mask,
-		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
-	reset_topology_timer();
-}
-
 static int dt_update_callback(struct notifier_block *nb,
 				unsigned long action, void *data)
 {
@@ -1559,7 +1552,7 @@
 		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
 			u32 core_id;
 			of_property_read_u32(update->dn, "reg", &core_id);
-			stage_topology_update(core_id);
+			rc = dlpar_cpu_readd(core_id);
 			rc = NOTIFY_OK;
 		}
 		break;
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index eb82d78..b7e9c09 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -69,6 +69,13 @@
 	std	r15,EX_TLB_R15(r12)
 	std	r10,EX_TLB_CR(r12)
 #ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+	mfspr r11, SPRN_SRR1
+	andi. r10,r11,MSR_PR
+	beq 1f
+	BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
 	std	r7,EX_TLB_R7(r12)
 #endif
 	TLB_MISS_PROLOG_STATS
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 89f7007..7b1d172 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -51,6 +51,8 @@
 #define PPC_LIS(r, i)		PPC_ADDIS(r, 0, i)
 #define PPC_STD(r, base, i)	EMIT(PPC_INST_STD | ___PPC_RS(r) |	      \
 				     ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b)	EMIT(PPC_INST_STDX | ___PPC_RS(r) |	      \
+				     ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_STDU(r, base, i)	EMIT(PPC_INST_STDU | ___PPC_RS(r) |	      \
 				     ___PPC_RA(base) | ((i) & 0xfffc))
 #define PPC_STW(r, base, i)	EMIT(PPC_INST_STW | ___PPC_RS(r) |	      \
@@ -65,7 +67,9 @@
 #define PPC_LBZ(r, base, i)	EMIT(PPC_INST_LBZ | ___PPC_RT(r) |	      \
 				     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LD(r, base, i)	EMIT(PPC_INST_LD | ___PPC_RT(r) |	      \
-				     ___PPC_RA(base) | IMM_L(i))
+				     ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b)	EMIT(PPC_INST_LDX | ___PPC_RT(r) |	      \
+				     ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_LWZ(r, base, i)	EMIT(PPC_INST_LWZ | ___PPC_RT(r) |	      \
 				     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LHZ(r, base, i)	EMIT(PPC_INST_LHZ | ___PPC_RT(r) |	      \
@@ -85,17 +89,6 @@
 					___PPC_RA(a) | ___PPC_RB(b))
 #define PPC_BPF_STDCX(s, a, b)	EMIT(PPC_INST_STDCX | ___PPC_RS(s) |	      \
 					___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
 #define PPC_CMPWI(a, i)		EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPDI(a, i)		EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPW(a, b)		EMIT(PPC_INST_CMPW | ___PPC_RA(a) |	      \
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index a8cd7e2..81a9045 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -122,6 +122,10 @@
 #define PPC_NTOHS_OFFS(r, base, i)	PPC_LHZ_OFFS(r, base, i)
 #endif
 
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
 #define SEEN_DATAREF 0x10000 /* might call external helpers */
 #define SEEN_XREG    0x20000 /* X reg is used */
 #define SEEN_MEM     0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 62fa758..bb944b6 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -86,6 +86,26 @@
 			(imm >= SKF_LL_OFF ? func##_negative_offset : func) :	\
 			func##_positive_offset)
 
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do {					      \
+				if ((i) % 4) {				      \
+					PPC_LI(b2p[TMP_REG_2], (i));	      \
+					PPC_LDX(r, base, b2p[TMP_REG_2]);     \
+				} else					      \
+					PPC_LD(r, base, i);		      \
+				} while(0)
+#define PPC_BPF_STL(r, base, i) do {					      \
+				if ((i) % 4) {				      \
+					PPC_LI(b2p[TMP_REG_2], (i));	      \
+					PPC_STDX(r, base, b2p[TMP_REG_2]);    \
+				} else					      \
+					PPC_STD(r, base, i);		      \
+				} while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
 #define SEEN_FUNC	0x1000 /* might call external helpers */
 #define SEEN_STACK	0x2000 /* uses BPF stack */
 #define SEEN_SKB	0x4000 /* uses sk_buff */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index bdbbc32..e7d78f9 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -265,7 +265,7 @@
 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 	 *   goto out;
 	 */
-	PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+	PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
 	PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
 	PPC_BCC(COND_GT, out);
 
@@ -278,7 +278,7 @@
 	/* prog = array->ptrs[index]; */
 	PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
 	PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
-	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 
 	/*
 	 * if (prog == NULL)
@@ -288,7 +288,7 @@
 	PPC_BCC(COND_EQ, out);
 
 	/* goto *(prog->bpf_func + prologue_size); */
-	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 #ifdef PPC64_ELF_ABI_v1
 	/* skip past the function descriptor */
 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -620,7 +620,7 @@
 				 * the instructions generated will remain the
 				 * same across all passes
 				 */
-				PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+				PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
 				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
 				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
 				break;
@@ -676,7 +676,7 @@
 				PPC_LI32(b2p[TMP_REG_1], imm);
 				src_reg = b2p[TMP_REG_1];
 			}
-			PPC_STD(src_reg, dst_reg, off);
+			PPC_BPF_STL(src_reg, dst_reg, off);
 			break;
 
 		/*
@@ -723,7 +723,7 @@
 			break;
 		/* dst = *(u64 *)(ul) (src + off) */
 		case BPF_LDX | BPF_MEM | BPF_DW:
-			PPC_LD(dst_reg, src_reg, off);
+			PPC_BPF_LL(dst_reg, src_reg, off);
 			break;
 
 		/*
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 3d1ecd2..8137f77 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -26,13 +26,13 @@
 #define SS_MSR		0x74
 #define SS_SDR1		0x78
 #define SS_LR		0x7c
-#define SS_SPRG		0x80 /* 4 SPRGs */
-#define SS_DBAT		0x90 /* 8 DBATs */
-#define SS_IBAT		0xd0 /* 8 IBATs */
-#define SS_TB		0x110
-#define SS_CR		0x118
-#define SS_GPREG	0x11c /* r12-r31 */
-#define STATE_SAVE_SIZE 0x16c
+#define SS_SPRG		0x80 /* 8 SPRGs */
+#define SS_DBAT		0xa0 /* 8 DBATs */
+#define SS_IBAT		0xe0 /* 8 IBATs */
+#define SS_TB		0x120
+#define SS_CR		0x128
+#define SS_GPREG	0x12c /* r12-r31 */
+#define STATE_SAVE_SIZE 0x17c
 
 	.section .data
 	.align	5
@@ -103,6 +103,16 @@
 	stw	r7, SS_SPRG+12(r3)
 	stw	r8, SS_SDR1(r3)
 
+	mfspr	r4, SPRN_SPRG4
+	mfspr	r5, SPRN_SPRG5
+	mfspr	r6, SPRN_SPRG6
+	mfspr	r7, SPRN_SPRG7
+
+	stw	r4, SS_SPRG+16(r3)
+	stw	r5, SS_SPRG+20(r3)
+	stw	r6, SS_SPRG+24(r3)
+	stw	r7, SS_SPRG+28(r3)
+
 	mfspr	r4, SPRN_DBAT0U
 	mfspr	r5, SPRN_DBAT0L
 	mfspr	r6, SPRN_DBAT1U
@@ -493,6 +503,16 @@
 	mtspr	SPRN_IBAT7U, r6
 	mtspr	SPRN_IBAT7L, r7
 
+	lwz	r4, SS_SPRG+16(r3)
+	lwz	r5, SS_SPRG+20(r3)
+	lwz	r6, SS_SPRG+24(r3)
+	lwz	r7, SS_SPRG+28(r3)
+
+	mtspr	SPRN_SPRG4, r4
+	mtspr	SPRN_SPRG5, r5
+	mtspr	SPRN_SPRG6, r6
+	mtspr	SPRN_SPRG7, r7
+
 	lwz	r4, SS_SPRG+0(r3)
 	lwz	r5, SS_SPRG+4(r3)
 	lwz	r6, SS_SPRG+8(r3)
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 88301e5..5d8e8b6 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -22,6 +22,7 @@
 
 #include <linux/cpufreq.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
@@ -48,7 +49,7 @@
 	cpu = info->policy->cpu;
 	busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
 
-	CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
+	info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
 	pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
 			cpu, busy_spus, info->busy_spus);
 
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 460f5f3..1149c34 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -24,6 +24,7 @@
 
 #include <linux/errno.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/sched/rt.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
@@ -986,9 +987,9 @@
 	unsigned long active_tasks; /* fixed-point */
 
 	active_tasks = count_active_contexts() * FIXED_1;
-	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
-	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
-	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
+	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
+	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
 }
 
 static void spusched_wake(unsigned long data)
@@ -1070,9 +1071,6 @@
 	}
 }
 
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static int show_spu_loadavg(struct seq_file *s, void *private)
 {
 	int a, b, c;
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 3fd683e..2914529 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -104,6 +104,10 @@
 	/* MEM2 64MB@0x10000000 */
 	delta = wii_hole_start + wii_hole_size;
 	size = top - delta;
+
+	if (__map_without_bats)
+		return delta;
+
 	for (bl = 128<<10; bl < max_size; bl <<= 1) {
 		if (bl * 2 > size)
 			break;
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 39d6ff9..d10bad1 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -98,7 +98,7 @@
 }
 
 static struct bin_attribute opal_msglog_attr = {
-	.attr = {.name = "msglog", .mode = 0444},
+	.attr = {.name = "msglog", .mode = 0400},
 	.read = opal_msglog_read
 };
 
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 17203ab..365e2b6 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -77,6 +77,12 @@
 	if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
 		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
 
+	if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
+		security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+	if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
+		security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
 	/*
 	 * The features below are enabled by default, so we instead look to see
 	 * if firmware has *disabled* them, and clear them if so.
@@ -123,6 +129,7 @@
 		  security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
 
 	setup_rfi_flush(type, enable);
+	setup_count_cache_flush();
 }
 
 static void __init pnv_setup_arch(void)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a1b63e0..7a2beed 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -785,6 +785,25 @@
 	return rc;
 }
 
+int dlpar_cpu_readd(int cpu)
+{
+	struct device_node *dn;
+	struct device *dev;
+	u32 drc_index;
+	int rc;
+
+	dev = get_cpu_device(cpu);
+	dn = dev->of_node;
+
+	rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+
+	rc = dlpar_cpu_remove_by_index(drc_index);
+	if (!rc)
+		rc = dlpar_cpu_add(drc_index);
+
+	return rc;
+}
+
 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
 {
 	u32 count, drc_index;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 91ade77..adb09ab 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -475,6 +475,12 @@
 	if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
 		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
 
+	if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
+		security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+	if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
+		security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
 	/*
 	 * The features below are enabled by default, so we instead look to see
 	 * if firmware has *disabled* them, and clear them if so.
@@ -525,6 +531,7 @@
 		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
 
 	setup_rfi_flush(types, enable);
+	setup_count_cache_flush();
 }
 
 static void __init pSeries_setup_arch(void)
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 08b9e94..731dc7e 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -17,15 +17,12 @@
 #include <linux/kernel_stat.h>
 #include <linux/netdevice.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <asm/appldata.h>
 #include <asm/smp.h>
 
 #include "appldata.h"
 
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 /*
  * OS data
  *
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 8d665f1..f0fe566 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -215,11 +215,14 @@
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK	(is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK	(is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK	MMAP_RND_MASK
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 1fb6d57..fd00566 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -180,10 +180,10 @@
 
 struct sh_clk_ops;
 
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
 }
 
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
 {
 }
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
index 49bace4..c6d9604 100644
--- a/arch/sh/drivers/heartbeat.c
+++ b/arch/sh/drivers/heartbeat.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/slab.h>
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index 3ae36f3..44a3ed9 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -8,6 +8,7 @@
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/uaccess.h>
+#include <linux/sched/loadavg.h>
 
 #include <asm/auxio.h>
 
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 4714061..0ae73ee 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -112,7 +112,7 @@
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 	       tsk->comm, task_pid_nr(tsk), address,
 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 643c149..8c85aaf 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -152,7 +152,7 @@
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 	       tsk->comm, task_pid_nr(tsk), address,
 	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index ad8f206..68cf65f 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -149,7 +149,7 @@
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 		tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
 		(void *)UPT_IP(regs), (void *)UPT_SP(regs),
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb431d8..5c9b390 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -938,13 +938,7 @@
 	  approximately eight kilobytes to the kernel image.
 
 config SCHED_SMT
-	bool "SMT (Hyperthreading) scheduler support"
-	depends on SMP
-	---help---
-	  SMT scheduler support improves the CPU scheduler's decision making
-	  when dealing with Intel Pentium 4 chips with HyperThreading at a
-	  cost of slightly increased overhead in some places. If unsure say
-	  N here.
+	def_bool y if SMP
 
 config SCHED_MC
 	def_bool y
@@ -2052,14 +2046,8 @@
 	   If unsure, leave at the default value.
 
 config HOTPLUG_CPU
-	bool "Support for hot-pluggable CPUs"
+	def_bool y
 	depends on SMP
-	---help---
-	  Say Y here to allow turning CPUs off and on. CPUs can be
-	  controlled through /sys/devices/system/cpu.
-	  ( Note: power management support will enable this option
-	    automatically on SMP systems. )
-	  Say N if you want to disable CPU hotplug.
 
 config BOOTPARAM_HOTPLUG_CPU0
 	bool "Set default setting of cpu0_hotpluggable"
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3b7156f..3b16935 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -100,7 +100,7 @@
 AFLAGS_header.o += -I$(objtree)/$(obj)
 $(obj)/header.o: $(obj)/zoffset.h
 
-LDFLAGS_setup.elf	:= -T
+LDFLAGS_setup.elf	:= -m elf_i386 -T
 $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
 	$(call if_changed,ld)
 
diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig
index c679e2d..f691e2e 100644
--- a/arch/x86/configs/i386_ranchu_defconfig
+++ b/arch/x86/configs/i386_ranchu_defconfig
@@ -253,7 +253,6 @@
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=y
 # CONFIG_SERIO is not set
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 4da2b7e..df87d30 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -9,18 +9,22 @@
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_NAMESPACES=y
+CONFIG_SCHED_TUNE=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
@@ -85,6 +89,7 @@
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
 CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -92,6 +97,7 @@
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_MULTIPATH=y
 CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
@@ -152,6 +158,7 @@
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_HELPER=y
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -192,9 +199,11 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_L2TP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_U32=y
 CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
@@ -206,7 +215,6 @@
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
 CONFIG_DEBUG_DEVRES=y
 CONFIG_OF=y
 CONFIG_OF_UNITTEST=y
@@ -234,6 +242,7 @@
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
 CONFIG_DM_ANDROID_VERITY=y
+CONFIG_DM_BOW=y
 CONFIG_NETDEVICES=y
 CONFIG_NETCONSOLE=y
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -244,8 +253,8 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
 CONFIG_USB_RTL8152=y
 CONFIG_USB_USBNET=y
 # CONFIG_USB_NET_AX8817X is not set
@@ -287,7 +296,6 @@
 CONFIG_TABLET_USB_HANWANG=y
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=y
 # CONFIG_SERIO_I8042 is not set
@@ -435,6 +443,7 @@
 CONFIG_QFMT_V2=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_PROC_KCORE=y
@@ -482,6 +491,7 @@
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_ADIANTUM=y
 CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
 CONFIG_CRYPTO_LZ4=y
 CONFIG_CRYPTO_ZSTD=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig
index f094365..cafa3df 100644
--- a/arch/x86/configs/x86_64_ranchu_defconfig
+++ b/arch/x86/configs/x86_64_ranchu_defconfig
@@ -250,7 +250,6 @@
 CONFIG_TABLET_USB_KBTAB=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
-CONFIG_INPUT_KEYCHORD=y
 CONFIG_INPUT_UINPUT=y
 CONFIG_INPUT_GPIO=y
 # CONFIG_SERIO is not set
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index cd4df93..7bbfe7d 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -76,15 +76,14 @@
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
 	if (irq_fpu_usable()) {
 		kernel_fpu_begin();
-		*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
 		kernel_fpu_end();
 	} else
-		*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -93,15 +92,13 @@
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index eff2f41..ec234c4 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -321,6 +321,12 @@
 	vpaddq		t2,t1,t1
 	vmovq		t1x,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -359,16 +365,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index 338c748..639d976 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -251,16 +251,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
@@ -518,6 +518,12 @@
 	paddq		t2,t1
 	movq		t1,d4
 
+	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+	# integers.  It's true in a single-block implementation, but not here.
+
 	# d1 += d0 >> 26
 	mov		d0,%rax
 	shr		$26,%rax
@@ -556,16 +562,16 @@
 	# h0 += (d4 >> 26) * 5
 	mov		d4,%rax
 	shr		$26,%rax
-	lea		(%eax,%eax,4),%eax
-	add		%eax,%ebx
+	lea		(%rax,%rax,4),%rax
+	add		%rax,%rbx
 	# h4 = d4 & 0x3ffffff
 	mov		d4,%rax
 	and		$0x3ffffff,%eax
 	mov		%eax,h4
 
 	# h1 += h0 >> 26
-	mov		%ebx,%eax
-	shr		$26,%eax
+	mov		%rbx,%rax
+	shr		$26,%rax
 	add		%eax,h1
 	# h0 = h0 & 0x3ffffff
 	andl		$0x3ffffff,%ebx
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index b83eafa..ed319b0 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -29,6 +29,7 @@
 #include <asm/vdso.h>
 #include <asm/uaccess.h>
 #include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -209,6 +210,8 @@
 #endif
 
 	user_enter_irqoff();
+
+	mds_user_clear_cpu_buffers();
 }
 
 #define SYSCALL_EXIT_WORK_FLAGS				\
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index a76dc73..1cf1676 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -219,6 +219,7 @@
 	pushl	%ebx
 	pushl	%edi
 	pushl	%esi
+	pushfl
 
 	/* switch stack */
 	movl	%esp, TASK_threadsp(%eax)
@@ -241,6 +242,7 @@
 #endif
 
 	/* restore callee-saved registers */
+	popfl
 	popl	%esi
 	popl	%edi
 	popl	%ebx
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d764992..50924c0 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -313,6 +313,7 @@
 	pushq	%r13
 	pushq	%r14
 	pushq	%r15
+	pushfq
 
 	/* switch stack */
 	movq	%rsp, TASK_threadsp(%rdi)
@@ -335,6 +336,7 @@
 #endif
 
 	/* restore callee-saved registers */
+	popfq
 	popq	%r15
 	popq	%r14
 	popq	%r13
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 51a858e..0d3ebdf 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -47,10 +47,8 @@
 
 export CPPFLAGS_vdso.lds += -P -C
 
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-			-Wl,--no-undefined \
-			-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
-			$(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+			-z max-page-size=4096
 
 $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
 	$(call if_changed,vdso)
@@ -96,10 +94,8 @@
 #
 
 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
-			   -Wl,-soname=linux-vdso.so.1 \
-			   -Wl,-z,max-page-size=4096 \
-			   -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+			   -z max-page-size=4096
 
 # 64-bit objects to re-brand as x32
 vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
@@ -127,7 +123,7 @@
 	$(call if_changed,vdso)
 
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
 
 # This makes sure the $(obj) subdirectory exists even though vdso32/
 # is not a kbuild sub-make subdirectory.
@@ -165,14 +161,14 @@
 # The DSO images are built using a special linker script.
 #
 quiet_cmd_vdso = VDSO    $@
-      cmd_vdso = $(CC) -nostdlib -o $@ \
+      cmd_vdso = $(LD) -nostdlib -o $@ \
 		       $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+		       -T $(filter %.lds,$^) $(filter %.o,$^) && \
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
-	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \
-	$(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS))
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+	$(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+	-Bsymbolic
 GCOV_PROFILE := n
 
 #
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index afb222b..00b56cc 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -112,23 +112,144 @@
  },
 };
 
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+		[C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
+		[C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+		[C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+		[C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+[C(BPU)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
+		[C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+[C(NODE)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = 0,
+		[C(RESULT_MISS)]   = 0,
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = -1,
+		[C(RESULT_MISS)]   = -1,
+	},
+},
+};
+
 /*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
  */
 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]			= 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]			= 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]		= 0x077d,
-  [PERF_COUNT_HW_CACHE_MISSES]			= 0x077e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]		= 0x00c2,
-  [PERF_COUNT_HW_BRANCH_MISSES]			= 0x00c3,
-  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
-  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
+	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x077d,
+	[PERF_COUNT_HW_CACHE_MISSES]		= 0x077e,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0xff60,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x0287,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x0187,
 };
 
 static u64 amd_pmu_event_map(int hw_event)
 {
+	if (boot_cpu_data.x86 >= 0x17)
+		return amd_f17h_perfmon_event_map[hw_event];
+
 	return amd_perfmon_event_map[hw_event];
 }
 
@@ -714,9 +835,10 @@
 		x86_pmu.amd_nb_constraints = 0;
 	}
 
-	/* Events are common for all AMDs */
-	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
-	       sizeof(hw_cache_event_ids));
+	if (boot_cpu_data.x86 >= 0x17)
+		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+	else
+		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 
 	return 0;
 }
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 098ab77..cb8178a2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2867,7 +2867,7 @@
 		return ret;
 
 	if (event->attr.precise_ip) {
-		if (!event->attr.freq) {
+		if (!(event->attr.freq || event->attr.wakeup_events)) {
 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
 			if (!(event->attr.sample_type &
 			      ~intel_pmu_free_running_flags(event)))
@@ -3750,11 +3750,11 @@
 		pr_cont("Nehalem events, ");
 		break;
 
-	case INTEL_FAM6_ATOM_PINEVIEW:
-	case INTEL_FAM6_ATOM_LINCROFT:
-	case INTEL_FAM6_ATOM_PENWELL:
-	case INTEL_FAM6_ATOM_CLOVERVIEW:
-	case INTEL_FAM6_ATOM_CEDARVIEW:
+	case INTEL_FAM6_ATOM_BONNELL:
+	case INTEL_FAM6_ATOM_BONNELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL:
+	case INTEL_FAM6_ATOM_SALTWELL_MID:
+	case INTEL_FAM6_ATOM_SALTWELL_TABLET:
 		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
@@ -3766,9 +3766,11 @@
 		pr_cont("Atom events, ");
 		break;
 
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_SILVERMONT2:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
+	case INTEL_FAM6_ATOM_SILVERMONT_MID:
 	case INTEL_FAM6_ATOM_AIRMONT:
+	case INTEL_FAM6_ATOM_AIRMONT_MID:
 		memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
 			sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3785,7 +3787,7 @@
 		break;
 
 	case INTEL_FAM6_ATOM_GOLDMONT:
-	case INTEL_FAM6_ATOM_DENVERTON:
+	case INTEL_FAM6_ATOM_GOLDMONT_X:
 		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 47d526c..72d0934 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -531,8 +531,8 @@
 
 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
 
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
-	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
+	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
 
 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index be0b196..68144a3 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -61,8 +61,8 @@
 	case INTEL_FAM6_BROADWELL_GT3E:
 	case INTEL_FAM6_BROADWELL_X:
 
-	case INTEL_FAM6_ATOM_SILVERMONT1:
-	case INTEL_FAM6_ATOM_SILVERMONT2:
+	case INTEL_FAM6_ATOM_SILVERMONT:
+	case INTEL_FAM6_ATOM_SILVERMONT_X:
 	case INTEL_FAM6_ATOM_AIRMONT:
 		if (idx == PERF_MSR_SMI)
 			return true;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 1ce6ae3..c42c9d50 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -996,12 +996,12 @@
 	return 0;
 }
 
-static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 {
 	return 0;
 }
 
-static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
+static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
 {
 }
 
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 98444b7..06de338 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -271,10 +271,12 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
-#define X86_FEATURE_AMD_IBPB	(13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_AMD_IBRS	(13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_AMD_STIBP	(13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB	(13*32+12) /* "" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS	(13*32+14) /* "" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP	(13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD	(13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD	(13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO	(13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -315,6 +317,7 @@
 #define X86_FEATURE_AVX512_4VNNIW	(18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS	(18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_TSX_FORCE_ABORT	(18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_MD_CLEAR		(18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_PCONFIG		(18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
@@ -352,5 +355,7 @@
 #define X86_BUG_SPECTRE_V2	X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
 #define X86_BUG_L1TF		X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS		X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY	X86_BUG(20) /* CPU is only affected by the  MSDBS variant of BUG_MDS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 75b748a..ba7b6f7 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -50,19 +50,23 @@
 
 /* "Small Core" Processors (Atom) */
 
-#define INTEL_FAM6_ATOM_PINEVIEW	0x1C
-#define INTEL_FAM6_ATOM_LINCROFT	0x26
-#define INTEL_FAM6_ATOM_PENWELL		0x27
-#define INTEL_FAM6_ATOM_CLOVERVIEW	0x35
-#define INTEL_FAM6_ATOM_CEDARVIEW	0x36
-#define INTEL_FAM6_ATOM_SILVERMONT1	0x37 /* BayTrail/BYT / Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT2	0x4D /* Avaton/Rangely */
-#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD	0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD	0x5A /* Anniedale */
-#define INTEL_FAM6_ATOM_GOLDMONT	0x5C
-#define INTEL_FAM6_ATOM_DENVERTON	0x5F /* Goldmont Microserver */
-#define INTEL_FAM6_ATOM_GEMINI_LAKE	0x7A
+#define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
+#define INTEL_FAM6_ATOM_BONNELL_MID	0x26 /* Silverthorne, Lincroft */
+
+#define INTEL_FAM6_ATOM_SALTWELL	0x36 /* Cedarview */
+#define INTEL_FAM6_ATOM_SALTWELL_MID	0x27 /* Penwell */
+#define INTEL_FAM6_ATOM_SALTWELL_TABLET	0x35 /* Cloverview */
+
+#define INTEL_FAM6_ATOM_SILVERMONT	0x37 /* Bay Trail, Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT_X	0x4D /* Avaton, Rangely */
+#define INTEL_FAM6_ATOM_SILVERMONT_MID	0x4A /* Merriefield */
+
+#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* Cherry Trail, Braswell */
+#define INTEL_FAM6_ATOM_AIRMONT_MID	0x5A /* Moorefield */
+
+#define INTEL_FAM6_ATOM_GOLDMONT	0x5C /* Apollo Lake */
+#define INTEL_FAM6_ATOM_GOLDMONT_X	0x5F /* Denverton */
+#define INTEL_FAM6_ATOM_GOLDMONT_PLUS	0x7A /* Gemini Lake */
 
 /* Xeon Phi */
 
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 508a062..0c8f428 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -5,6 +5,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/nospec-branch.h>
+
 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
 #define __cpuidle __attribute__((__section__(".cpuidle.text")))
 
@@ -53,11 +55,13 @@
 
 static inline __cpuidle void native_safe_halt(void)
 {
+	mds_idle_clear_cpu_buffers();
 	asm volatile("sti; hlt": : :"memory");
 }
 
 static inline __cpuidle void native_halt(void)
 {
+	mds_idle_clear_cpu_buffers();
 	asm volatile("hlt": : :"memory");
 }
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9a8167b..83b5b29 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -487,6 +487,7 @@
 	bool tpr_access_reporting;
 	u64 ia32_xss;
 	u64 microcode_version;
+	u64 arch_capabilities;
 
 	/*
 	 * Paging state of the vcpu
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 5e69154..a61ec81 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@
 
 #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
 
+static inline u32 intel_get_microcode_revision(void)
+{
+	u32 rev, dummy;
+
+	native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+	/* As documented in the SDM: Do a CPUID 1 here */
+	sync_core();
+
+	/* get the current revision from MSR 0x8B */
+	native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+	return rev;
+}
+
 extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
 extern int microcode_sanity_check(void *mc, int print_err);
 extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9963e21..38f94d0 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_MSR_INDEX_H
 #define _ASM_X86_MSR_INDEX_H
 
+#include <linux/bits.h>
+
 /*
  * CPU model specific register (MSR) numbers.
  *
@@ -38,13 +40,14 @@
 
 /* Intel MSRs. Some also available on other CPUs */
 #define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
-#define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP			(1 << 1)   /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_IBRS			BIT(0)	   /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP_SHIFT		1	   /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP			BIT(SPEC_CTRL_STIBP_SHIFT)	/* STIBP mask */
 #define SPEC_CTRL_SSBD_SHIFT		2	   /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD			(1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD			BIT(SPEC_CTRL_SSBD_SHIFT)	/* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
-#define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
 
 #define MSR_IA32_PERFCTR0		0x000000c1
 #define MSR_IA32_PERFCTR1		0x000000c2
@@ -61,20 +64,25 @@
 #define MSR_MTRRcap			0x000000fe
 
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
-#define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
-#define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH	(1 << 3)   /* Skip L1D flush on vmentry */
-#define ARCH_CAP_SSB_NO			(1 << 4)   /*
-						    * Not susceptible to Speculative Store Bypass
-						    * attack, so no Speculative Store Bypass
-						    * control required.
-						    */
+#define ARCH_CAP_RDCL_NO		BIT(0)	/* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL		BIT(1)	/* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH	BIT(3)	/* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO			BIT(4)	/*
+						 * Not susceptible to Speculative Store Bypass
+						 * attack, so no Speculative Store Bypass
+						 * control required.
+						 */
+#define ARCH_CAP_MDS_NO			BIT(5)   /*
+						  * Not susceptible to
+						  * Microarchitectural Data
+						  * Sampling (MDS) vulnerabilities.
+						  */
 
 #define MSR_IA32_FLUSH_CMD		0x0000010b
-#define L1D_FLUSH			(1 << 0)   /*
-						    * Writeback and invalidate the
-						    * L1 data cache.
-						    */
+#define L1D_FLUSH			BIT(0)	/*
+						 * Writeback and invalidate the
+						 * L1 data cache.
+						 */
 
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index f37f2d8..0b40cc4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 
 #include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
 
 #define MWAIT_SUBSTATE_MASK		0xf
 #define MWAIT_CSTATE_MASK		0xf
@@ -38,6 +39,8 @@
 
 static inline void __mwait(unsigned long eax, unsigned long ecx)
 {
+	mds_idle_clear_cpu_buffers();
+
 	/* "mwait %eax, %ecx;" */
 	asm volatile(".byte 0x0f, 0x01, 0xc9;"
 		     :: "a" (eax), "c" (ecx));
@@ -72,6 +75,8 @@
 static inline void __mwaitx(unsigned long eax, unsigned long ebx,
 			    unsigned long ecx)
 {
+	/* No MDS buffer clear as this is AMD/HYGON only */
+
 	/* "mwaitx %eax, %ebx, %ecx;" */
 	asm volatile(".byte 0x0f, 0x01, 0xfb;"
 		     :: "a" (eax), "b" (ebx), "c" (ecx));
@@ -79,6 +84,8 @@
 
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
+	mds_idle_clear_cpu_buffers();
+
 	trace_hardirqs_on();
 	/* "mwait %eax, %ecx;" */
 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 1b41321..031a58e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -3,6 +3,8 @@
 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
 #define _ASM_X86_NOSPEC_BRANCH_H_
 
+#include <linux/static_key.h>
+
 #include <asm/alternative.h>
 #include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
@@ -214,10 +216,17 @@
 	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
 	SPECTRE_V2_RETPOLINE_GENERIC,
 	SPECTRE_V2_RETPOLINE_AMD,
-	SPECTRE_V2_IBRS,
 	SPECTRE_V2_IBRS_ENHANCED,
 };
 
+/* The indirect branch speculation control variants */
+enum spectre_v2_user_mitigation {
+	SPECTRE_V2_USER_NONE,
+	SPECTRE_V2_USER_STRICT,
+	SPECTRE_V2_USER_PRCTL,
+	SPECTRE_V2_USER_SECCOMP,
+};
+
 /* The Speculative Store Bypass disable variants */
 enum ssb_mitigation {
 	SPEC_STORE_BYPASS_NONE,
@@ -295,6 +304,60 @@
 	preempt_enable();						\
 } while (0)
 
+DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+#include <asm/segment.h>
+
+/**
+ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+static inline void mds_clear_cpu_buffers(void)
+{
+	static const u16 ds = __KERNEL_DS;
+
+	/*
+	 * Has to be the memory-operand variant because only that
+	 * guarantees the CPU buffer flush functionality according to
+	 * documentation. The register-operand variant does not.
+	 * Works with any segment selector, but a valid writable
+	 * data segment is the fastest variant.
+	 *
+	 * "cc" clobber is required because VERW modifies ZF.
+	 */
+	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+}
+
+/**
+ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_user_clear_cpu_buffers(void)
+{
+	if (static_branch_likely(&mds_user_clear))
+		mds_clear_cpu_buffers();
+}
+
+/**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_idle_clear_cpu_buffers(void)
+{
+	if (static_branch_likely(&mds_idle_clear))
+		mds_clear_cpu_buffers();
+}
+
 #endif /* __ASSEMBLY__ */
 
 /*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 221a32e..f12e61e 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -44,15 +44,15 @@
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
 
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
+{
+	WRITE_ONCE(*ptep, pte);
+}
+
 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
 				    pte_t *ptep)
 {
-	*ptep = native_make_pte(0);
-}
-
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
-{
-	*ptep = pte;
+	native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -62,7 +62,7 @@
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-	*pmdp = pmd;
+	WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -98,7 +98,7 @@
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-	*pudp = pud;
+	WRITE_ONCE(*pudp, pud);
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -131,7 +131,7 @@
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-	*pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
+	WRITE_ONCE(*pgdp, kaiser_set_shadow_pgd(pgdp, pgd));
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ee8c629..155e49f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -874,4 +874,10 @@
 
 extern enum l1tf_mitigations l1tf_mitigation;
 
+enum mds_mitigations {
+	MDS_MITIGATION_OFF,
+	MDS_MITIGATION_FULL,
+	MDS_MITIGATION_VMWERV,
+};
+
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5..5393bab 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@
 	return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+	BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+	return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
 {
 	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
 	return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+	BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+	return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
 {
 	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@
 static inline void speculative_store_bypass_ht_init(void) { }
 #endif
 
-extern void speculative_store_bypass_update(unsigned long tif);
-
-static inline void speculative_store_bypass_update_current(void)
-{
-	speculative_store_bypass_update(current_thread_info()->flags);
-}
+extern void speculation_ctrl_update(unsigned long tif);
+extern void speculation_ctrl_update_current(void);
 
 #endif
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 37f2e0b..4141ead 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -55,13 +55,16 @@
 static inline unsigned long *
 get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
 {
+	struct inactive_task_frame *frame;
+
 	if (regs)
 		return (unsigned long *)regs->bp;
 
 	if (task == current)
 		return __builtin_frame_address(0);
 
-	return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+	frame = (struct inactive_task_frame *)task->thread.sp;
+	return (unsigned long *)READ_ONCE_NOCHECK(frame->bp);
 }
 #else
 static inline unsigned long *
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 8e9dbe7..5cc2ce4 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -11,7 +11,13 @@
 
 /* image of the saved processor state */
 struct saved_context {
-	u16 es, fs, gs, ss;
+	/*
+	 * On x86_32, all segment registers, with the possible exception of
+	 * gs, are saved at kernel entry in pt_regs.
+	 */
+#ifdef CONFIG_X86_32_LAZY_GS
+	u16 gs;
+#endif
 	unsigned long cr0, cr2, cr3, cr4;
 	u64 misc_enable;
 	bool misc_enable_saved;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 2bd96b4..7017519 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -19,8 +19,20 @@
  */
 struct saved_context {
 	struct pt_regs regs;
-	u16 ds, es, fs, gs, ss;
-	unsigned long gs_base, gs_kernel_base, fs_base;
+
+	/*
+	 * User CS and SS are saved in current_pt_regs().  The rest of the
+	 * segment selectors need to be saved and restored here.
+	 */
+	u16 ds, es, fs, gs;
+
+	/*
+	 * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
+	 * so we save them separately.  We save the kernelmode GSBASE to
+	 * restore percpu access after resume.
+	 */
+	unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
+
 	unsigned long cr0, cr2, cr3, cr4, cr8;
 	u64 misc_enable;
 	bool misc_enable_saved;
@@ -29,8 +41,7 @@
 	u16 gdt_pad; /* Unused */
 	struct desc_ptr gdt_desc;
 	u16 idt_pad;
-	u16 idt_limit;
-	unsigned long idt_base;
+	struct desc_ptr idt;
 	u16 ldt;
 	u16 tss;
 	unsigned long tr;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436a..e959b8d 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -8,9 +8,6 @@
 
 __visible struct task_struct *__switch_to(struct task_struct *prev,
 					  struct task_struct *next);
-struct tss_struct;
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-		      struct tss_struct *tss);
 
 /* This runs runs on the previous thread's stack. */
 static inline void prepare_switch_to(struct task_struct *prev,
@@ -38,6 +35,7 @@
 
 /* data that is pointed to by thread.sp */
 struct inactive_task_frame {
+	unsigned long flags;
 #ifdef CONFIG_X86_64
 	unsigned long r15;
 	unsigned long r14;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 83252c2..34191cf 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,10 +83,12 @@
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
-#define TIF_SSBD		5	/* Reduced data speculation */
+#define TIF_SSBD		5	/* Speculative store bypass disable */
 #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SECCOMP		8	/* secure computing */
+#define TIF_SPEC_IB		9	/* Indirect branch speculation mitigation */
+#define TIF_SPEC_FORCE_UPDATE	10	/* Force speculation MSR update in context switch */
 #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
 #define TIF_UPROBE		12	/* breakpointed or singlestepping */
 #define TIF_NOTSC		16	/* TSC is not accessible in userland */
@@ -112,6 +114,8 @@
 #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB		(1 << TIF_SPEC_IB)
+#define _TIF_SPEC_FORCE_UPDATE	(1 << TIF_SPEC_FORCE_UPDATE)
 #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE		(1 << TIF_UPROBE)
 #define _TIF_NOTSC		(1 << TIF_NOTSC)
@@ -142,8 +146,18 @@
 	_TIF_NOHZ | _TIF_FSCHECK)
 
 /* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW							\
-	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+#define _TIF_WORK_CTXSW_BASE						\
+	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|			\
+	 _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+
+/*
+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
+ */
+#ifdef CONFIG_SMP
+# define _TIF_WORK_CTXSW	(_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
+#else
+# define _TIF_WORK_CTXSW	(_TIF_WORK_CTXSW_BASE)
+#endif
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 686a58d..f5ca156 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -68,8 +68,12 @@
 struct tlb_state {
 	struct mm_struct *active_mm;
 	int state;
-	/* last user mm's ctx id */
-	u64 last_ctx_id;
+
+	/* Last user mm for optimizing IBPB */
+	union {
+		struct mm_struct	*last_user_mm;
+		unsigned long		last_user_mm_ibpb;
+	};
 
 	/*
 	 * Access to this CR4 shadow and to H/W CR4 is protected by
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 1fdcb8c..79183bc 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -215,6 +215,9 @@
 	__HYPERCALL_DECLS;
 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
+	if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+		return -EINVAL;
+
 	stac();
 	asm volatile(CALL_NOSPEC
 		     : __HYPERCALL_5PARAM
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 3dec769..1c532b3 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -27,7 +27,6 @@
 header-y += mce.h
 header-y += mman.h
 header-y += msgbuf.h
-header-y += msr-index.h
 header-y += msr.h
 header-y += mtrr.h
 header-y += param.h
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 69a6e07..db7dae5 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -28,6 +28,8 @@
 	__u64 mcgcap;	/* MCGCAP MSR: machine check capabilities of CPU */
 	__u64 synd;	/* MCA_SYND MSR: only valid on SMCA systems */
 	__u64 ipid;	/* MCA_IPID MSR: only valid on SMCA systems */
+	__u64 ppin;	/* Protected Processor Inventory Number */
+	__u32 microcode;/* Microcode revision */
 };
 
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 169963f..50b8ed0 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -109,6 +109,15 @@
 	movq	pt_regs_r14(%rax), %r14
 	movq	pt_regs_r15(%rax), %r15
 
+#ifdef CONFIG_KASAN
+	/*
+	 * The suspend path may have poisoned some areas deeper in the stack,
+	 * which we now need to unpoison.
+	 */
+	movq	%rsp, %rdi
+	call	kasan_unpoison_task_stack_below
+#endif
+
 	xorl	%eax, %eax
 	addq	$8, %rsp
 	FRAME_END
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 03b6e5c..f7430d8 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -287,7 +287,7 @@
 	tgt_rip  = next_rip + o_dspl;
 	n_dspl = tgt_rip - orig_insn;
 
-	DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
+	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
 
 	if (tgt_rip - orig_insn >= 0) {
 		if (n_dspl - 2 <= 127)
@@ -341,7 +341,7 @@
 	sync_core();
 	local_irq_restore(flags);
 
-	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
+	DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
 		   instr, a->instrlen - a->padlen, a->padlen);
 }
 
@@ -359,7 +359,7 @@
 	u8 *instr, *replacement;
 	u8 insnbuf[MAX_PATCH_LEN];
 
-	DPRINTK("alt table %p -> %p", start, end);
+	DPRINTK("alt table %px, -> %px", start, end);
 	/*
 	 * The scan order should be from start to end. A later scanned
 	 * alternative code can overwrite previously scanned alternative code.
@@ -383,14 +383,14 @@
 			continue;
 		}
 
-		DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
+		DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
 			a->cpuid >> 5,
 			a->cpuid & 0x1f,
 			instr, a->instrlen,
 			replacement, a->replacementlen, a->padlen);
 
-		DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
-		DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
+		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
 
 		memcpy(insnbuf, replacement, a->replacementlen);
 		insnbuf_sz = a->replacementlen;
@@ -411,7 +411,7 @@
 				 a->instrlen - a->replacementlen);
 			insnbuf_sz += a->instrlen - a->replacementlen;
 		}
-		DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
+		DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
 
 		text_poke_early(instr, insnbuf, insnbuf_sz);
 	}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6221166..16970c3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/nospec.h>
 #include <linux/prctl.h>
+#include <linux/sched/smt.h>
 
 #include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
@@ -24,6 +25,7 @@
 #include <asm/vmx.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
+#include <asm/hypervisor.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/intel-family.h>
@@ -32,13 +34,12 @@
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
 
-/*
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
- * writes to SPEC_CTRL contain whatever reserved bits have been set.
- */
-u64 __ro_after_init x86_spec_ctrl_base;
+/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+u64 x86_spec_ctrl_base;
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+static DEFINE_MUTEX(spec_ctrl_mutex);
 
 /*
  * The vendor and possibly platform specific bits which can be modified in
@@ -53,6 +54,20 @@
 u64 __ro_after_init x86_amd_ls_cfg_base;
 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
+/* Control conditional STIPB in switch_to() */
+DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+/* Control conditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+/* Control unconditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
+
 void __init check_bugs(void)
 {
 	identify_boot_cpu();
@@ -91,6 +106,10 @@
 
 	l1tf_select_mitigation();
 
+	mds_select_mitigation();
+
+	arch_smt_update();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -123,31 +142,6 @@
 #endif
 }
 
-/* The kernel command line selection */
-enum spectre_v2_mitigation_cmd {
-	SPECTRE_V2_CMD_NONE,
-	SPECTRE_V2_CMD_AUTO,
-	SPECTRE_V2_CMD_FORCE,
-	SPECTRE_V2_CMD_RETPOLINE,
-	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-	SPECTRE_V2_CMD_RETPOLINE_AMD,
-};
-
-static const char *spectre_v2_strings[] = {
-	[SPECTRE_V2_NONE]			= "Vulnerable",
-	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
-	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
-	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
-	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
-	[SPECTRE_V2_IBRS_ENHANCED]		= "Mitigation: Enhanced IBRS",
-};
-
-#undef pr_fmt
-#define pr_fmt(fmt)     "Spectre V2 : " fmt
-
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
-	SPECTRE_V2_NONE;
-
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
@@ -165,9 +159,14 @@
 		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
 		/* SSBD controlled in MSR_SPEC_CTRL */
-		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+		    static_cpu_has(X86_FEATURE_AMD_SSBD))
 			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
+		/* Conditional STIBP enabled? */
+		if (static_branch_unlikely(&switch_to_cond_stibp))
+			hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
 		if (hostval != guestval) {
 			msrval = setguest ? guestval : hostval;
 			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -201,7 +200,7 @@
 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
 				 ssbd_spec_ctrl_to_tif(hostval);
 
-		speculative_store_bypass_update(tif);
+		speculation_ctrl_update(tif);
 	}
 }
 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -216,6 +215,70 @@
 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)	"MDS: " fmt
+
+/* Default mitigation for MDS-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+	[MDS_MITIGATION_OFF]	= "Vulnerable",
+	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
+	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void __init mds_select_mitigation(void)
+{
+	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+		mds_mitigation = MDS_MITIGATION_OFF;
+		return;
+	}
+
+	if (mds_mitigation == MDS_MITIGATION_FULL) {
+		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+			mds_mitigation = MDS_MITIGATION_VMWERV;
+
+		static_branch_enable(&mds_user_clear);
+
+		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
+			cpu_smt_disable(false);
+	}
+
+	pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+	if (!boot_cpu_has_bug(X86_BUG_MDS))
+		return 0;
+
+	if (!str)
+		return -EINVAL;
+
+	if (!strcmp(str, "off"))
+		mds_mitigation = MDS_MITIGATION_OFF;
+	else if (!strcmp(str, "full"))
+		mds_mitigation = MDS_MITIGATION_FULL;
+	else if (!strcmp(str, "full,nosmt")) {
+		mds_mitigation = MDS_MITIGATION_FULL;
+		mds_nosmt = true;
+	}
+
+	return 0;
+}
+early_param("mds", mds_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 : " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+	SPECTRE_V2_NONE;
+
+static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+	SPECTRE_V2_USER_NONE;
+
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
 
@@ -237,15 +300,193 @@
 static inline const char *spectre_v2_module_string(void) { return ""; }
 #endif
 
-static void __init spec2_print_if_insecure(const char *reason)
+static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
-	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-		pr_info("%s selected on command line.\n", reason);
+	int len = strlen(opt);
+
+	return len == arglen && !strncmp(arg, opt, len);
 }
 
-static void __init spec2_print_if_secure(const char *reason)
+/* The kernel command line selection for spectre v2 */
+enum spectre_v2_mitigation_cmd {
+	SPECTRE_V2_CMD_NONE,
+	SPECTRE_V2_CMD_AUTO,
+	SPECTRE_V2_CMD_FORCE,
+	SPECTRE_V2_CMD_RETPOLINE,
+	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+	SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+enum spectre_v2_user_cmd {
+	SPECTRE_V2_USER_CMD_NONE,
+	SPECTRE_V2_USER_CMD_AUTO,
+	SPECTRE_V2_USER_CMD_FORCE,
+	SPECTRE_V2_USER_CMD_PRCTL,
+	SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+	SPECTRE_V2_USER_CMD_SECCOMP,
+	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
+};
+
+static const char * const spectre_v2_user_strings[] = {
+	[SPECTRE_V2_USER_NONE]		= "User space: Vulnerable",
+	[SPECTRE_V2_USER_STRICT]	= "User space: Mitigation: STIBP protection",
+	[SPECTRE_V2_USER_PRCTL]		= "User space: Mitigation: STIBP via prctl",
+	[SPECTRE_V2_USER_SECCOMP]	= "User space: Mitigation: STIBP via seccomp and prctl",
+};
+
+static const struct {
+	const char			*option;
+	enum spectre_v2_user_cmd	cmd;
+	bool				secure;
+} v2_user_options[] __initconst = {
+	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
+	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
+	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
+	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false },
+	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false },
+	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false },
+	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false },
+};
+
+static void __init spec_v2_user_print_cond(const char *reason, bool secure)
 {
-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+		pr_info("spectre_v2_user=%s forced on command line.\n", reason);
+}
+
+static enum spectre_v2_user_cmd __init
+spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
+{
+	char arg[20];
+	int ret, i;
+
+	switch (v2_cmd) {
+	case SPECTRE_V2_CMD_NONE:
+		return SPECTRE_V2_USER_CMD_NONE;
+	case SPECTRE_V2_CMD_FORCE:
+		return SPECTRE_V2_USER_CMD_FORCE;
+	default:
+		break;
+	}
+
+	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
+				  arg, sizeof(arg));
+	if (ret < 0)
+		return SPECTRE_V2_USER_CMD_AUTO;
+
+	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
+		if (match_option(arg, ret, v2_user_options[i].option)) {
+			spec_v2_user_print_cond(v2_user_options[i].option,
+						v2_user_options[i].secure);
+			return v2_user_options[i].cmd;
+		}
+	}
+
+	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
+	return SPECTRE_V2_USER_CMD_AUTO;
+}
+
+static void __init
+spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+{
+	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
+	bool smt_possible = IS_ENABLED(CONFIG_SMP);
+	enum spectre_v2_user_cmd cmd;
+
+	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+		return;
+
+	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+		smt_possible = false;
+
+	cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+	switch (cmd) {
+	case SPECTRE_V2_USER_CMD_NONE:
+		goto set_mode;
+	case SPECTRE_V2_USER_CMD_FORCE:
+		mode = SPECTRE_V2_USER_STRICT;
+		break;
+	case SPECTRE_V2_USER_CMD_PRCTL:
+	case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+		mode = SPECTRE_V2_USER_PRCTL;
+		break;
+	case SPECTRE_V2_USER_CMD_AUTO:
+	case SPECTRE_V2_USER_CMD_SECCOMP:
+	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+		if (IS_ENABLED(CONFIG_SECCOMP))
+			mode = SPECTRE_V2_USER_SECCOMP;
+		else
+			mode = SPECTRE_V2_USER_PRCTL;
+		break;
+	}
+
+	/* Initialize Indirect Branch Prediction Barrier */
+	if (boot_cpu_has(X86_FEATURE_IBPB)) {
+		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+
+		switch (cmd) {
+		case SPECTRE_V2_USER_CMD_FORCE:
+		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+			static_branch_enable(&switch_mm_always_ibpb);
+			break;
+		case SPECTRE_V2_USER_CMD_PRCTL:
+		case SPECTRE_V2_USER_CMD_AUTO:
+		case SPECTRE_V2_USER_CMD_SECCOMP:
+			static_branch_enable(&switch_mm_cond_ibpb);
+			break;
+		default:
+			break;
+		}
+
+		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+			static_key_enabled(&switch_mm_always_ibpb) ?
+			"always-on" : "conditional");
+	}
+
+	/* If enhanced IBRS is enabled no STIPB required */
+	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+		return;
+
+	/*
+	 * If SMT is not possible or STIBP is not available clear the STIPB
+	 * mode.
+	 */
+	if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+		mode = SPECTRE_V2_USER_NONE;
+set_mode:
+	spectre_v2_user = mode;
+	/* Only print the STIBP mode when SMT possible */
+	if (smt_possible)
+		pr_info("%s\n", spectre_v2_user_strings[mode]);
+}
+
+static const char * const spectre_v2_strings[] = {
+	[SPECTRE_V2_NONE]			= "Vulnerable",
+	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
+	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
+	[SPECTRE_V2_IBRS_ENHANCED]		= "Mitigation: Enhanced IBRS",
+};
+
+static const struct {
+	const char *option;
+	enum spectre_v2_mitigation_cmd cmd;
+	bool secure;
+} mitigation_options[] __initconst = {
+	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
+	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
+	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
+	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_AMD,	  false },
+	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },
+};
+
+static void __init spec_v2_print_cond(const char *reason, bool secure)
+{
+	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
 		pr_info("%s selected on command line.\n", reason);
 }
 
@@ -254,50 +495,30 @@
 	return __is_defined(RETPOLINE);
 }
 
-static inline bool match_option(const char *arg, int arglen, const char *opt)
-{
-	int len = strlen(opt);
-
-	return len == arglen && !strncmp(arg, opt, len);
-}
-
-static const struct {
-	const char *option;
-	enum spectre_v2_mitigation_cmd cmd;
-	bool secure;
-} mitigation_options[] = {
-	{ "off",               SPECTRE_V2_CMD_NONE,              false },
-	{ "on",                SPECTRE_V2_CMD_FORCE,             true },
-	{ "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
-	{ "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
-	{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
-	{ "auto",              SPECTRE_V2_CMD_AUTO,              false },
-};
-
 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 {
+	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
 	char arg[20];
 	int ret, i;
-	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
 
-	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+	    cpu_mitigations_off())
 		return SPECTRE_V2_CMD_NONE;
-	else {
-		ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
-		if (ret < 0)
-			return SPECTRE_V2_CMD_AUTO;
 
-		for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
-			if (!match_option(arg, ret, mitigation_options[i].option))
-				continue;
-			cmd = mitigation_options[i].cmd;
-			break;
-		}
+	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+	if (ret < 0)
+		return SPECTRE_V2_CMD_AUTO;
 
-		if (i >= ARRAY_SIZE(mitigation_options)) {
-			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
-			return SPECTRE_V2_CMD_AUTO;
-		}
+	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
+		if (!match_option(arg, ret, mitigation_options[i].option))
+			continue;
+		cmd = mitigation_options[i].cmd;
+		break;
+	}
+
+	if (i >= ARRAY_SIZE(mitigation_options)) {
+		pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+		return SPECTRE_V2_CMD_AUTO;
 	}
 
 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -314,11 +535,8 @@
 		return SPECTRE_V2_CMD_AUTO;
 	}
 
-	if (mitigation_options[i].secure)
-		spec2_print_if_secure(mitigation_options[i].option);
-	else
-		spec2_print_if_insecure(mitigation_options[i].option);
-
+	spec_v2_print_cond(mitigation_options[i].option,
+			   mitigation_options[i].secure);
 	return cmd;
 }
 
@@ -400,12 +618,6 @@
 	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
 	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 
-	/* Initialize Indirect Branch Prediction Barrier if supported */
-	if (boot_cpu_has(X86_FEATURE_IBPB)) {
-		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
-		pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
-	}
-
 	/*
 	 * Retpoline means the kernel is safe because it has no indirect
 	 * branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -421,6 +633,99 @@
 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
 		pr_info("Enabling Restricted Speculation for firmware calls\n");
 	}
+
+	/* Set up IBPB and STIBP depending on the general spectre V2 command */
+	spectre_v2_user_select_mitigation(cmd);
+}
+
+static void update_stibp_msr(void * __unused)
+{
+	wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
+{
+	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+	if (sched_smt_active())
+		mask |= SPEC_CTRL_STIBP;
+
+	if (mask == x86_spec_ctrl_base)
+		return;
+
+	pr_info("Update user space SMT mitigation: STIBP %s\n",
+		mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+	x86_spec_ctrl_base = mask;
+	on_each_cpu(update_stibp_msr, NULL, 1);
+}
+
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
+static void update_indir_branch_cond(void)
+{
+	if (sched_smt_active())
+		static_branch_enable(&switch_to_cond_stibp);
+	else
+		static_branch_disable(&switch_to_cond_stibp);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
+{
+	/*
+	 * Enable the idle clearing if SMT is active on CPUs which are
+	 * affected only by MSBDS and not any other MDS variant.
+	 *
+	 * The other variants cannot be mitigated when SMT is enabled, so
+	 * clearing the buffers on idle just to prevent the Store Buffer
+	 * repartitioning leak would be a window dressing exercise.
+	 */
+	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
+		return;
+
+	if (sched_smt_active())
+		static_branch_enable(&mds_idle_clear);
+	else
+		static_branch_disable(&mds_idle_clear);
+}
+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+
+void arch_smt_update(void)
+{
+	/* Enhanced IBRS implies STIBP. No update required. */
+	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+		return;
+
+	mutex_lock(&spec_ctrl_mutex);
+
+	switch (spectre_v2_user) {
+	case SPECTRE_V2_USER_NONE:
+		break;
+	case SPECTRE_V2_USER_STRICT:
+		update_stibp_strict();
+		break;
+	case SPECTRE_V2_USER_PRCTL:
+	case SPECTRE_V2_USER_SECCOMP:
+		update_indir_branch_cond();
+		break;
+	}
+
+	switch (mds_mitigation) {
+	case MDS_MITIGATION_FULL:
+	case MDS_MITIGATION_VMWERV:
+		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+			pr_warn_once(MDS_MSG_SMT);
+		update_mds_branch_idle();
+		break;
+	case MDS_MITIGATION_OFF:
+		break;
+	}
+
+	mutex_unlock(&spec_ctrl_mutex);
 }
 
 #undef pr_fmt
@@ -437,7 +742,7 @@
 	SPEC_STORE_BYPASS_CMD_SECCOMP,
 };
 
-static const char *ssb_strings[] = {
+static const char * const ssb_strings[] = {
 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -447,7 +752,7 @@
 static const struct {
 	const char *option;
 	enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[] = {
+} ssb_mitigation_options[]  __initconst = {
 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
@@ -461,7 +766,8 @@
 	char arg[20];
 	int ret, i;
 
-	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+	    cpu_mitigations_off()) {
 		return SPEC_STORE_BYPASS_CMD_NONE;
 	} else {
 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -531,18 +837,16 @@
 	if (mode == SPEC_STORE_BYPASS_DISABLE) {
 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
 		/*
-		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
-		 * a completely different MSR and bit dependent on family.
+		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
+		 * use a completely different MSR and bit dependent on family.
 		 */
-		switch (boot_cpu_data.x86_vendor) {
-		case X86_VENDOR_INTEL:
+		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+			x86_amd_ssb_disable();
+		} else {
 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
 			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
 			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-			break;
-		case X86_VENDOR_AMD:
-			x86_amd_ssb_disable();
-			break;
 		}
 	}
 
@@ -560,10 +864,25 @@
 #undef pr_fmt
 #define pr_fmt(fmt)     "Speculation prctl: " fmt
 
+static void task_update_spec_tif(struct task_struct *tsk)
+{
+	/* Force the update of the real TIF bits */
+	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
+
+	/*
+	 * Immediately update the speculation control MSRs for the current
+	 * task, but for a non-current task delay setting the CPU
+	 * mitigation until it is scheduled next.
+	 *
+	 * This can only happen for SECCOMP mitigation. For PRCTL it's
+	 * always the current task.
+	 */
+	if (tsk == current)
+		speculation_ctrl_update_current();
+}
+
 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
-	bool update;
-
 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
 		return -ENXIO;
@@ -574,28 +893,56 @@
 		if (task_spec_ssb_force_disable(task))
 			return -EPERM;
 		task_clear_spec_ssb_disable(task);
-		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+		task_update_spec_tif(task);
 		break;
 	case PR_SPEC_DISABLE:
 		task_set_spec_ssb_disable(task);
-		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		task_update_spec_tif(task);
 		break;
 	case PR_SPEC_FORCE_DISABLE:
 		task_set_spec_ssb_disable(task);
 		task_set_spec_ssb_force_disable(task);
-		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+		task_update_spec_tif(task);
 		break;
 	default:
 		return -ERANGE;
 	}
+	return 0;
+}
 
-	/*
-	 * If being set on non-current task, delay setting the CPU
-	 * mitigation until it is next scheduled.
-	 */
-	if (task == current && update)
-		speculative_store_bypass_update_current();
-
+static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+	switch (ctrl) {
+	case PR_SPEC_ENABLE:
+		if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+			return 0;
+		/*
+		 * Indirect branch speculation is always disabled in strict
+		 * mode.
+		 */
+		if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+			return -EPERM;
+		task_clear_spec_ib_disable(task);
+		task_update_spec_tif(task);
+		break;
+	case PR_SPEC_DISABLE:
+	case PR_SPEC_FORCE_DISABLE:
+		/*
+		 * Indirect branch speculation is always allowed when
+		 * mitigation is force disabled.
+		 */
+		if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+			return -EPERM;
+		if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+			return 0;
+		task_set_spec_ib_disable(task);
+		if (ctrl == PR_SPEC_FORCE_DISABLE)
+			task_set_spec_ib_force_disable(task);
+		task_update_spec_tif(task);
+		break;
+	default:
+		return -ERANGE;
+	}
 	return 0;
 }
 
@@ -605,6 +952,8 @@
 	switch (which) {
 	case PR_SPEC_STORE_BYPASS:
 		return ssb_prctl_set(task, ctrl);
+	case PR_SPEC_INDIRECT_BRANCH:
+		return ib_prctl_set(task, ctrl);
 	default:
 		return -ENODEV;
 	}
@@ -615,6 +964,8 @@
 {
 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+	if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
 }
 #endif
 
@@ -637,11 +988,35 @@
 	}
 }
 
+static int ib_prctl_get(struct task_struct *task)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		return PR_SPEC_NOT_AFFECTED;
+
+	switch (spectre_v2_user) {
+	case SPECTRE_V2_USER_NONE:
+		return PR_SPEC_ENABLE;
+	case SPECTRE_V2_USER_PRCTL:
+	case SPECTRE_V2_USER_SECCOMP:
+		if (task_spec_ib_force_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+		if (task_spec_ib_disable(task))
+			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+	case SPECTRE_V2_USER_STRICT:
+		return PR_SPEC_DISABLE;
+	default:
+		return PR_SPEC_NOT_AFFECTED;
+	}
+}
+
 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 {
 	switch (which) {
 	case PR_SPEC_STORE_BYPASS:
 		return ssb_prctl_get(task);
+	case PR_SPEC_INDIRECT_BRANCH:
+		return ib_prctl_get(task);
 	default:
 		return -ENODEV;
 	}
@@ -713,6 +1088,11 @@
 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
 		return;
 
+	if (cpu_mitigations_off())
+		l1tf_mitigation = L1TF_MITIGATION_OFF;
+	else if (cpu_mitigations_auto_nosmt())
+		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
 	override_cache_bits(&boot_cpu_data);
 
 	switch (l1tf_mitigation) {
@@ -735,12 +1115,13 @@
 #endif
 
 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
-	if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+			e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
 				half_pa);
 		pr_info("However, doing so will make a part of your RAM unusable.\n");
-		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
 		return;
 	}
 
@@ -773,13 +1154,14 @@
 early_param("l1tf", l1tf_cmdline);
 
 #undef pr_fmt
+#define pr_fmt(fmt) fmt
 
 #ifdef CONFIG_SYSFS
 
 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
 
 #if IS_ENABLED(CONFIG_KVM_INTEL)
-static const char *l1tf_vmx_states[] = {
+static const char * const l1tf_vmx_states[] = {
 	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
 	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
 	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
@@ -795,13 +1177,14 @@
 
 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
-	     cpu_smt_control == CPU_SMT_ENABLED))
+	     sched_smt_active())) {
 		return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
 			       l1tf_vmx_states[l1tf_vmx_mitigation]);
+	}
 
 	return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
 		       l1tf_vmx_states[l1tf_vmx_mitigation],
-		       cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+		       sched_smt_active() ? "vulnerable" : "disabled");
 }
 #else
 static ssize_t l1tf_show_state(char *buf)
@@ -810,6 +1193,55 @@
 }
 #endif
 
+static ssize_t mds_show_state(char *buf)
+{
+#ifdef CONFIG_HYPERVISOR_GUEST
+	if (x86_hyper) {
+		return sprintf(buf, "%s; SMT Host state unknown\n",
+			       mds_strings[mds_mitigation]);
+	}
+#endif
+
+	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+		return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+			       (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+			        sched_smt_active() ? "mitigated" : "disabled"));
+	}
+
+	return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+		       sched_smt_active() ? "vulnerable" : "disabled");
+}
+
+static char *stibp_state(void)
+{
+	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+		return "";
+
+	switch (spectre_v2_user) {
+	case SPECTRE_V2_USER_NONE:
+		return ", STIBP: disabled";
+	case SPECTRE_V2_USER_STRICT:
+		return ", STIBP: forced";
+	case SPECTRE_V2_USER_PRCTL:
+	case SPECTRE_V2_USER_SECCOMP:
+		if (static_key_enabled(&switch_to_cond_stibp))
+			return ", STIBP: conditional";
+	}
+	return "";
+}
+
+static char *ibpb_state(void)
+{
+	if (boot_cpu_has(X86_FEATURE_IBPB)) {
+		if (static_key_enabled(&switch_mm_always_ibpb))
+			return ", IBPB: always-on";
+		if (static_key_enabled(&switch_mm_cond_ibpb))
+			return ", IBPB: conditional";
+		return ", IBPB: disabled";
+	}
+	return "";
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
 			       char *buf, unsigned int bug)
 {
@@ -827,9 +1259,11 @@
 		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
 
 	case X86_BUG_SPECTRE_V2:
-		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+		return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+			       ibpb_state(),
 			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+			       stibp_state(),
+			       boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
 			       spectre_v2_module_string());
 
 	case X86_BUG_SPEC_STORE_BYPASS:
@@ -839,6 +1273,10 @@
 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
 			return l1tf_show_state(buf);
 		break;
+
+	case X86_BUG_MDS:
+		return mds_show_state(buf);
+
 	default:
 		break;
 	}
@@ -870,4 +1308,9 @@
 {
 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
 }
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3c01610..cda130d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -752,6 +752,12 @@
 		set_cpu_cap(c, X86_FEATURE_STIBP);
 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 	}
+
+	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+		set_cpu_cap(c, X86_FEATURE_SSBD);
+		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+	}
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -885,84 +891,95 @@
 	c->x86_cache_bits = c->x86_phys_bits;
 }
 
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CEDARVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CLOVERVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_LINCROFT,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PENWELL,	X86_FEATURE_ANY },
-	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PINEVIEW,	X86_FEATURE_ANY },
-	{ X86_VENDOR_CENTAUR,	5 },
-	{ X86_VENDOR_INTEL,	5 },
-	{ X86_VENDOR_NSC,	5 },
-	{ X86_VENDOR_ANY,	4 },
+#define NO_SPECULATION	BIT(0)
+#define NO_MELTDOWN	BIT(1)
+#define NO_SSB		BIT(2)
+#define NO_L1TF		BIT(3)
+#define NO_MDS		BIT(4)
+#define MSBDS_ONLY	BIT(5)
+
+#define VULNWL(_vendor, _family, _model, _whitelist)	\
+	{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist)		\
+	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist)		\
+	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
+	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
+
+	/* Intel Family 6 */
+	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION),
+	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION),
+	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION),
+
+	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_SILVERMONT_X,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY),
+
+	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
+
+	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY),
+
+	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF),
+	VULNWL_INTEL(ATOM_GOLDMONT_X,		NO_MDS | NO_L1TF),
+	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF),
+
+	/* AMD Family 0xf - 0x12 */
+	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS),
 	{}
 };
 
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
-	{ X86_VENDOR_AMD },
-	{}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+	const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
 
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PINEVIEW	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_LINCROFT	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PENWELL		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CLOVERVIEW	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CEDARVIEW	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
-	{ X86_VENDOR_CENTAUR,	5,					},
-	{ X86_VENDOR_INTEL,	5,					},
-	{ X86_VENDOR_NSC,	5,					},
-	{ X86_VENDOR_AMD,	0x12,					},
-	{ X86_VENDOR_AMD,	0x11,					},
-	{ X86_VENDOR_AMD,	0x10,					},
-	{ X86_VENDOR_AMD,	0xf,					},
-	{ X86_VENDOR_ANY,	4,					},
-	{}
-};
-
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
-	/* in addition to cpu_no_speculation */
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MOOREFIELD	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GOLDMONT	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_DENVERTON	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GEMINI_LAKE	},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
-	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
-	{}
-};
+	return m && !!(m->driver_data & which);
+}
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = 0;
 
-	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
-		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
-
-	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
-	   !(ia32_cap & ARCH_CAP_SSB_NO))
-		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
-
-	if (x86_match_cpu(cpu_no_speculation))
+	if (cpu_matches(NO_SPECULATION))
 		return;
 
 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
 	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
+	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+	if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
 	if (ia32_cap & ARCH_CAP_IBRS_ALL)
 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
 
-	if (x86_match_cpu(cpu_no_meltdown))
+	if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+		setup_force_cpu_bug(X86_BUG_MDS);
+		if (cpu_matches(MSBDS_ONLY))
+			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+	}
+
+	if (cpu_matches(NO_MELTDOWN))
 		return;
 
 	/* Rogue Data Cache Load? No! */
@@ -971,7 +988,7 @@
 
 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
-	if (x86_match_cpu(cpu_no_l1tf))
+	if (cpu_matches(NO_L1TF))
 		return;
 
 	setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d39cfb2..311d0fa 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -121,7 +121,7 @@
 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
 
 	/* Load/Store Serialize to mem access disable (=reorder it) */
-	setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
+	setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
 	/* set load/store serialize from 1GB to 4GB */
 	ccr3 |= 0xe0;
 	setCx86(CX86_CCR3, ccr3);
@@ -132,11 +132,11 @@
 	pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
 
 	/* CCR2 bit 2: unlock NW bit */
-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
+	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
 	/* set 'Not Write-through' */
 	write_cr0(read_cr0() | X86_CR0_NW);
 	/* CCR2 bit 2: lock NW bit and set WT1 */
-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
+	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
 }
 
 /*
@@ -150,14 +150,14 @@
 	local_irq_save(flags);
 
 	/* Suspend on halt power saving and enable #SUSP pin */
-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
+	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
 
 	ccr3 = getCx86(CX86_CCR3);
 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
 
 
 	/* FPU fast, DTE cache, Mem bypass */
-	setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
+	setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
 
 	set_cx86_memwb();
@@ -293,7 +293,7 @@
 		/* GXm supports extended cpuid levels 'ala' AMD */
 		if (c->cpuid_level == 2) {
 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
-			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
+			setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
 
 			/*
 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
@@ -316,7 +316,7 @@
 		if (dir1 > 7) {
 			dir0_msn++;  /* M II */
 			/* Enable MMX extensions (App note 108) */
-			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
+			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
 		} else {
 			/* A 6x86MX - it has the bug. */
 			set_cpu_bug(c, X86_BUG_COMA);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cee0fec..860f2fd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
 #include <asm/bugs.h>
 #include <asm/cpu.h>
 #include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
 
 #ifdef CONFIG_X86_64
 #include <linux/topology.h>
@@ -137,14 +138,8 @@
 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 
-	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
-		unsigned lower_word;
-
-		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
-		/* Required by the SDM */
-		sync_core();
-		rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
-	}
+	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+		c->microcode = intel_get_microcode_revision();
 
 	/* Now if any of them are set, check the blacklist and clear the lot */
 	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 3e0199e..0372913e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -148,6 +148,11 @@
 		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
 		KERNEL
 		),
+	MCESEV(
+		PANIC, "Instruction fetch error in kernel",
+		SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+		KERNEL
+		),
 #endif
 	MCESEV(
 		PANIC, "Action required: unknown MCACOD",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 25310d2..d9ad49c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -139,6 +139,8 @@
 	m->socketid = cpu_data(m->extcpu).phys_proc_id;
 	m->apicid = cpu_data(m->extcpu).initial_apicid;
 	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+	m->microcode = boot_cpu_data.microcode;
 }
 
 DEFINE_PER_CPU(struct mce, injectm);
@@ -309,7 +311,7 @@
 	 */
 	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
 		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
-		cpu_data(m->extcpu).microcode);
+		m->microcode);
 
 	pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
 }
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 732bb03..a19fddf 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -707,22 +707,26 @@
 		return -1;
 
 	/* need to apply patch? */
-	if (rev >= mc_amd->hdr.patch_id) {
-		c->microcode = rev;
-		uci->cpu_sig.rev = rev;
-		return 0;
-	}
+	if (rev >= mc_amd->hdr.patch_id)
+		goto out;
 
 	if (__apply_microcode_amd(mc_amd)) {
 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
 			cpu, mc_amd->hdr.patch_id);
 		return -1;
 	}
-	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-		mc_amd->hdr.patch_id);
 
-	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
-	c->microcode = mc_amd->hdr.patch_id;
+	rev = mc_amd->hdr.patch_id;
+
+	pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
+
+out:
+	uci->cpu_sig.rev = rev;
+	c->microcode	 = rev;
+
+	/* Update boot_cpu_data's revision too, if we're on the BSP: */
+	if (c->cpu_index == boot_cpu_data.cpu_index)
+		boot_cpu_data.microcode = rev;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 79291d6..1308abf 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -386,15 +386,8 @@
 		native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 		csig.pf = 1 << ((val[1] >> 18) & 7);
 	}
-	native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
-	/* As documented in the SDM: Do a CPUID 1 here */
-	sync_core();
-
-	/* get the current revision from MSR 0x8B */
-	native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
-	csig.rev = val[1];
+	csig.rev = intel_get_microcode_revision();
 
 	uci->cpu_sig = csig;
 	uci->valid = 1;
@@ -618,29 +611,35 @@
 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 {
 	struct microcode_intel *mc;
-	unsigned int val[2];
+	u32 rev;
 
 	mc = uci->mc;
 	if (!mc)
 		return 0;
 
+	/*
+	 * Save us the MSR write below - which is a particular expensive
+	 * operation - when the other hyperthread has updated the microcode
+	 * already.
+	 */
+	rev = intel_get_microcode_revision();
+	if (rev >= mc->hdr.rev) {
+		uci->cpu_sig.rev = rev;
+		return 0;
+	}
+
 	/* write microcode via MSR 0x79 */
 	native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
-	native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
-	/* As documented in the SDM: Do a CPUID 1 here */
-	sync_core();
-
-	/* get the current revision from MSR 0x8B */
-	native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-	if (val[1] != mc->hdr.rev)
+	rev = intel_get_microcode_revision();
+	if (rev != mc->hdr.rev)
 		return -1;
 
 #ifdef CONFIG_X86_64
 	/* Flush global tlb. This is precaution. */
 	flush_tlb_early();
 #endif
-	uci->cpu_sig.rev = val[1];
+	uci->cpu_sig.rev = rev;
 
 	if (early)
 		print_ucode(uci);
@@ -903,9 +902,9 @@
 {
 	struct microcode_intel *mc;
 	struct ucode_cpu_info *uci;
-	struct cpuinfo_x86 *c;
-	unsigned int val[2];
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	static int prev_rev;
+	u32 rev;
 
 	/* We should bind the task to the CPU */
 	if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -924,35 +923,42 @@
 	if (!get_matching_mc(mc, cpu))
 		return 0;
 
+	/*
+	 * Save us the MSR write below - which is a particular expensive
+	 * operation - when the other hyperthread has updated the microcode
+	 * already.
+	 */
+	rev = intel_get_microcode_revision();
+	if (rev >= mc->hdr.rev)
+		goto out;
+
 	/* write microcode via MSR 0x79 */
 	wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
-	wrmsrl(MSR_IA32_UCODE_REV, 0);
 
-	/* As documented in the SDM: Do a CPUID 1 here */
-	sync_core();
+	rev = intel_get_microcode_revision();
 
-	/* get the current revision from MSR 0x8B */
-	rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
-	if (val[1] != mc->hdr.rev) {
+	if (rev != mc->hdr.rev) {
 		pr_err("CPU%d update to revision 0x%x failed\n",
 		       cpu, mc->hdr.rev);
 		return -1;
 	}
 
-	if (val[1] != prev_rev) {
+	if (rev != prev_rev) {
 		pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
-			val[1],
+			rev,
 			mc->hdr.date & 0xffff,
 			mc->hdr.date >> 24,
 			(mc->hdr.date >> 16) & 0xff);
-		prev_rev = val[1];
+		prev_rev = rev;
 	}
 
-	c = &cpu_data(cpu);
+out:
+	uci->cpu_sig.rev = rev;
+	c->microcode	 = rev;
 
-	uci->cpu_sig.rev = val[1];
-	c->microcode = val[1];
+	/* Update boot_cpu_data's revision too, if we're on the BSP: */
+	if (c->cpu_index == boot_cpu_data.cpu_index)
+		boot_cpu_data.microcode = rev;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 756634f..775c23d 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -914,6 +914,8 @@
 		return 0;
 
 	hpet_set_mapping();
+	if (!hpet_virt_address)
+		return 0;
 
 	/*
 	 * Read the period and check for a sane value:
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766..9954a60 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -352,6 +352,7 @@
 #endif
 	default:
 		WARN_ON_ONCE(1);
+		return -EINVAL;
 	}
 
 	/*
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 64a70b2..3f3cfec 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -545,6 +545,7 @@
 	unsigned long *sara = stack_addr(regs);
 
 	ri->ret_addr = (kprobe_opcode_t *) *sara;
+	ri->fp = sara;
 
 	/* Replace the return addr with trampoline addr */
 	*sara = (unsigned long) &kretprobe_trampoline;
@@ -746,15 +747,21 @@
 	unsigned long flags, orig_ret_address = 0;
 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 	kprobe_opcode_t *correct_ret_addr = NULL;
+	void *frame_pointer;
+	bool skipped = false;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
 	/* fixup registers */
 #ifdef CONFIG_X86_64
 	regs->cs = __KERNEL_CS;
+	/* On x86-64, we use pt_regs->sp for return address holder. */
+	frame_pointer = &regs->sp;
 #else
 	regs->cs = __KERNEL_CS | get_kernel_rpl();
 	regs->gs = 0;
+	/* On x86-32, we use pt_regs->flags for return address holder. */
+	frame_pointer = &regs->flags;
 #endif
 	regs->ip = trampoline_address;
 	regs->orig_ax = ~0UL;
@@ -776,8 +783,25 @@
 		if (ri->task != current)
 			/* another task is sharing our hash bucket */
 			continue;
+		/*
+		 * Return probes must be pushed on this hash list correct
+		 * order (same as return order) so that it can be poped
+		 * correctly. However, if we find it is pushed it incorrect
+		 * order, this means we find a function which should not be
+		 * probed, because the wrong order entry is pushed on the
+		 * path of processing other kretprobe itself.
+		 */
+		if (ri->fp != frame_pointer) {
+			if (!skipped)
+				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+			skipped = true;
+			continue;
+		}
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
+		if (skipped)
+			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+				ri->rp->kp.addr);
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -795,6 +819,8 @@
 		if (ri->task != current)
 			/* another task is sharing our hash bucket */
 			continue;
+		if (ri->fp != frame_pointer)
+			continue;
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
 		if (ri->rp && ri->rp->handler) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index bfe4d6c..6b7b35d 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -32,6 +32,7 @@
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
 #include <asm/cache.h>
+#include <asm/nospec-branch.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/nmi.h>
@@ -544,6 +545,9 @@
 		write_cr2(this_cpu_read(nmi_cr2));
 	if (this_cpu_dec_return(nmi_state))
 		goto nmi_restart;
+
+	if (user_mode(regs))
+		mds_user_clear_cpu_buffers();
 }
 NOKPROBE_SYMBOL(do_nmi);
 
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e9195a1..f1f3c47 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -35,6 +35,8 @@
 #include <asm/switch_to.h>
 #include <asm/spec-ctrl.h>
 
+#include "process.h"
+
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * no more per-task TSS's. The TSS size is kept cacheline-aligned
@@ -170,11 +172,12 @@
 	return 0;
 }
 
-static inline void switch_to_bitmap(struct tss_struct *tss,
-				    struct thread_struct *prev,
+static inline void switch_to_bitmap(struct thread_struct *prev,
 				    struct thread_struct *next,
 				    unsigned long tifp, unsigned long tifn)
 {
+	struct tss_struct *tss = this_cpu_ptr(&cpu_tss);
+
 	if (tifn & _TIF_IO_BITMAP) {
 		/*
 		 * Copy the relevant range of the IO bitmap.
@@ -308,32 +311,85 @@
 	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
 }
 
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+/*
+ * Update the MSRs managing speculation control, during context switch.
+ *
+ * tifp: Previous task's thread flags
+ * tifn: Next task's thread flags
+ */
+static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+						      unsigned long tifn)
 {
-	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+	unsigned long tif_diff = tifp ^ tifn;
+	u64 msr = x86_spec_ctrl_base;
+	bool updmsr = false;
 
-	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+	/*
+	 * If TIF_SSBD is different, select the proper mitigation
+	 * method. Note that if SSBD mitigation is disabled or permanentely
+	 * enabled this branch can't be taken because nothing can set
+	 * TIF_SSBD.
+	 */
+	if (tif_diff & _TIF_SSBD) {
+		if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+			amd_set_ssb_virt_state(tifn);
+		} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+			amd_set_core_ssb_state(tifn);
+		} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+			   static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+			msr |= ssbd_tif_to_spec_ctrl(tifn);
+			updmsr  = true;
+		}
+	}
+
+	/*
+	 * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+	 * otherwise avoid the MSR write.
+	 */
+	if (IS_ENABLED(CONFIG_SMP) &&
+	    static_branch_unlikely(&switch_to_cond_stibp)) {
+		updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+		msr |= stibp_tif_to_spec_ctrl(tifn);
+	}
+
+	if (updmsr)
+		wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 {
-	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
-		amd_set_ssb_virt_state(tifn);
-	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
-		amd_set_core_ssb_state(tifn);
-	else
-		intel_set_ssb_state(tifn);
+	if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
+		if (task_spec_ssb_disable(tsk))
+			set_tsk_thread_flag(tsk, TIF_SSBD);
+		else
+			clear_tsk_thread_flag(tsk, TIF_SSBD);
+
+		if (task_spec_ib_disable(tsk))
+			set_tsk_thread_flag(tsk, TIF_SPEC_IB);
+		else
+			clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
+	}
+	/* Return the updated threadinfo flags*/
+	return task_thread_info(tsk)->flags;
 }
 
-void speculative_store_bypass_update(unsigned long tif)
+void speculation_ctrl_update(unsigned long tif)
 {
+	/* Forced update. Make sure all relevant TIF flags are different */
 	preempt_disable();
-	__speculative_store_bypass_update(tif);
+	__speculation_ctrl_update(~tif, tif);
 	preempt_enable();
 }
 
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-		      struct tss_struct *tss)
+/* Called from seccomp/prctl update */
+void speculation_ctrl_update_current(void)
+{
+	preempt_disable();
+	speculation_ctrl_update(speculation_ctrl_update_tif(current));
+	preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
 {
 	struct thread_struct *prev, *next;
 	unsigned long tifp, tifn;
@@ -343,7 +399,7 @@
 
 	tifn = READ_ONCE(task_thread_info(next_p)->flags);
 	tifp = READ_ONCE(task_thread_info(prev_p)->flags);
-	switch_to_bitmap(tss, prev, next, tifp, tifn);
+	switch_to_bitmap(prev, next, tifp, tifn);
 
 	propagate_user_return_notify(prev_p, next_p);
 
@@ -361,8 +417,15 @@
 	if ((tifp ^ tifn) & _TIF_NOTSC)
 		cr4_toggle_bits(X86_CR4_TSD);
 
-	if ((tifp ^ tifn) & _TIF_SSBD)
-		__speculative_store_bypass_update(tifn);
+	if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
+		__speculation_ctrl_update(tifp, tifn);
+	} else {
+		speculation_ctrl_update_tif(prev_p);
+		tifn = speculation_ctrl_update_tif(next_p);
+
+		/* Enforce MSR update to ensure consistent state */
+		__speculation_ctrl_update(~tifn, tifn);
+	}
 }
 
 /*
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644
index 0000000..898e97c
--- /dev/null
+++ b/arch/x86/kernel/process.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Code shared between 32 and 64 bit
+
+#include <asm/spec-ctrl.h>
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
+
+/*
+ * This needs to be inline to optimize for the common case where no extra
+ * work needs to be done.
+ */
+static inline void switch_to_extra(struct task_struct *prev,
+				   struct task_struct *next)
+{
+	unsigned long next_tif = task_thread_info(next)->flags;
+	unsigned long prev_tif = task_thread_info(prev)->flags;
+
+	if (IS_ENABLED(CONFIG_SMP)) {
+		/*
+		 * Avoid __switch_to_xtra() invocation when conditional
+		 * STIPB is disabled and the only different bit is
+		 * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
+		 * in the TIF_WORK_CTXSW masks.
+		 */
+		if (!static_branch_likely(&switch_to_cond_stibp)) {
+			prev_tif &= ~_TIF_SPEC_IB;
+			next_tif &= ~_TIF_SPEC_IB;
+		}
+	}
+
+	/*
+	 * __switch_to_xtra() handles debug registers, i/o bitmaps,
+	 * speculation mitigations etc.
+	 */
+	if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
+		     prev_tif & _TIF_WORK_CTXSW_PREV))
+		__switch_to_xtra(prev, next);
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8e..4ca26fc 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -55,6 +55,8 @@
 #include <asm/switch_to.h>
 #include <asm/vm86.h>
 
+#include "process.h"
+
 void __show_regs(struct pt_regs *regs, int all)
 {
 	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -127,6 +129,13 @@
 	struct task_struct *tsk;
 	int err;
 
+	/*
+	 * For a new task use the RESET flags value since there is no before.
+	 * All the status flags are zero; DF and all the system flags must also
+	 * be 0, specifically IF must be 0 because we context switch to the new
+	 * task with interrupts disabled.
+	 */
+	frame->flags = X86_EFLAGS_FIXED;
 	frame->bp = 0;
 	frame->ret_addr = (unsigned long) ret_from_fork;
 	p->thread.sp = (unsigned long) fork_frame;
@@ -264,12 +273,7 @@
 	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
 		set_iopl_mask(next->iopl);
 
-	/*
-	 * Now maybe handle debug registers and/or IO bitmaps
-	 */
-	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
-		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
-		__switch_to_xtra(prev_p, next_p, tss);
+	switch_to_extra(prev_p, next_p);
 
 	/*
 	 * Leave lazy mode, flushing any hypercalls made here.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a266181..6d6c15c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,6 +51,8 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/vdso.h>
 
+#include "process.h"
+
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
@@ -266,6 +268,14 @@
 	childregs = task_pt_regs(p);
 	fork_frame = container_of(childregs, struct fork_frame, regs);
 	frame = &fork_frame->frame;
+
+	/*
+	 * For a new task use the RESET flags value since there is no before.
+	 * All the status flags are zero; DF and all the system flags must also
+	 * be 0, specifically IF must be 0 because we context switch to the new
+	 * task with interrupts disabled.
+	 */
+	frame->flags = X86_EFLAGS_FIXED;
 	frame->bp = 0;
 	frame->ret_addr = (unsigned long) ret_from_fork;
 	p->thread.sp = (unsigned long) fork_frame;
@@ -454,12 +464,7 @@
 	/* Reload esp0 and ss1.  This changes current_thread_info(). */
 	load_sp0(tss, next);
 
-	/*
-	 * Now maybe reload the debug registers and handle I/O bitmaps
-	 */
-	if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
-		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
-		__switch_to_xtra(prev_p, next_p, tss);
+	switch_to_extra(prev_p, next_p);
 
 #ifdef CONFIG_XEN
 	/*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4a12362..c55b11f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -82,6 +82,19 @@
 	return 0;
 }
 
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+	if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+		reboot_type = BOOT_EFI;
+		pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+	}
+	return 0;
+}
+
 void __noreturn machine_real_restart(unsigned int type)
 {
 	local_irq_disable();
@@ -167,6 +180,14 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
 		},
 	},
+	{	/* Handle reboot issue on Acer TravelMate X514-51T */
+		.callback = set_efi_reboot,
+		.ident = "Acer TravelMate X514-51T",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+		},
+	},
 
 	/* Apple */
 	{	/* Handle problems with rebooting on Apple MacBook5 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 769c370..cb76841 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -713,7 +713,7 @@
 		case INTEL_FAM6_KABYLAKE_DESKTOP:
 			crystal_khz = 24000;	/* 24.0 MHz */
 			break;
-		case INTEL_FAM6_ATOM_DENVERTON:
+		case INTEL_FAM6_ATOM_GOLDMONT_X:
 			crystal_khz = 25000;	/* 25.0 MHz */
 			break;
 		case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index a2456d4..9b8b3cb 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
 
 #define FRAME_HEADER_SIZE (sizeof(long) * 2)
 
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x)			\
+({							\
+	unsigned long val;				\
+	if (task == current)				\
+		val = READ_ONCE(x);			\
+	else						\
+		val = READ_ONCE_NOCHECK(x);		\
+	val;						\
+})
+
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
 	unsigned long addr;
@@ -14,7 +29,8 @@
 	if (unwind_done(state))
 		return 0;
 
-	addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+	addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+	addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
 				     addr_p);
 
 	return __kernel_text_address(addr) ? addr : 0;
@@ -48,7 +64,7 @@
 	if (unwind_done(state))
 		return false;
 
-	next_bp = (unsigned long *)*state->bp;
+	next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
 
 	/* make sure the next frame's data is accessible */
 	if (!update_stack_state(state, next_bp, FRAME_HEADER_SIZE))
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index e783a5d..55f0487 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -367,7 +367,7 @@
  * Per-cpu symbols which need to be offset from __per_cpu_load
  * for the boot processor.
  */
-#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
 INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(irq_stack_union);
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c17d389..fc8236f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -355,7 +355,8 @@
 
 	/* cpuid 0x80000008.ebx */
 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-		F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+		F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+		F(AMD_SSB_NO) | F(AMD_STIBP);
 
 	/* cpuid 0xC0000001.edx */
 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -380,7 +381,8 @@
 
 	/* cpuid 7.0.edx*/
 	const u32 kvm_cpuid_7_0_edx_x86_features =
-		F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+		F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) |
+		F(INTEL_STIBP) | F(MD_CLEAR);
 
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
@@ -633,7 +635,12 @@
 			entry->ebx |= F(VIRT_SSBD);
 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
-		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+		/*
+		 * The preference is to use SPEC CTRL MSR instead of the
+		 * VIRT_SPEC MSR.
+		 */
+		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+		    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
 			entry->ebx |= F(VIRT_SSBD);
 		break;
 	}
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 8a841b9..b2bf8e1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -176,7 +176,7 @@
 	struct kvm_cpuid_entry2 *best;
 
 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
-	if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+	if (best && (best->ebx & (bit(X86_FEATURE_AMD_IBRS | bit(X86_FEATURE_AMD_SSBD)))))
 		return true;
 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
 	return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 510cfc0..b636a1e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2579,15 +2579,13 @@
 	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
 	 * supports long mode.
 	 */
-	cr4 = ctxt->ops->get_cr(ctxt, 4);
 	if (emulator_has_longmode(ctxt)) {
 		struct desc_struct cs_desc;
 
 		/* Zero CR4.PCIDE before CR0.PG.  */
-		if (cr4 & X86_CR4_PCIDE) {
+		cr4 = ctxt->ops->get_cr(ctxt, 4);
+		if (cr4 & X86_CR4_PCIDE)
 			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-			cr4 &= ~X86_CR4_PCIDE;
-		}
 
 		/* A 32-bit code segment is required to clear EFER.LMA.  */
 		memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2601,13 +2599,16 @@
 	if (cr0 & X86_CR0_PE)
 		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-	if (cr4 & X86_CR4_PAE)
-		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
+	if (emulator_has_longmode(ctxt)) {
+		/* Clear CR4.PAE before clearing EFER.LME. */
+		cr4 = ctxt->ops->get_cr(ctxt, 4);
+		if (cr4 & X86_CR4_PAE)
+			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-	/* And finally go back to 32-bit mode.  */
-	efer = 0;
-	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+		/* And finally go back to 32-bit mode.  */
+		efer = 0;
+		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+	}
 
 	smbase = ctxt->ops->get_smbase(ctxt);
 	if (emulator_has_longmode(ctxt))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 01eb045..9338136 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3704,7 +3704,7 @@
 			return 1;
 
 		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
 			return 1;
 
 		svm->spec_ctrl = data;
@@ -3940,14 +3940,25 @@
 		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
 		break;
 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+		int i;
+		struct kvm_vcpu *vcpu;
+		struct kvm *kvm = svm->vcpu.kvm;
 		struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
 		/*
-		 * Update ICR high and low, then emulate sending IPI,
-		 * which is handled when writing APIC_ICR.
+		 * At this point, we expect that the AVIC HW has already
+		 * set the appropriate IRR bits on the valid target
+		 * vcpus. So, we just need to kick the appropriate vcpu.
 		 */
-		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			bool m = kvm_apic_match_dest(vcpu, apic,
+						     icrl & KVM_APIC_SHORT_MASK,
+						     GET_APIC_DEST_FIELD(icrh),
+						     icrl & KVM_APIC_DEST_MASK);
+
+			if (m && !avic_vcpu_is_running(vcpu))
+				kvm_vcpu_wake_up(vcpu);
+		}
 		break;
 	}
 	case AVIC_IPI_FAILURE_INVALID_TARGET:
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0a6cc67..ea618b7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -434,13 +434,13 @@
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+	    TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
 	    TP_ARGS(apicid, dm, tm, vec),
 
 	TP_STRUCT__entry(
 		__field(	__u32,		apicid		)
 		__field(	__u16,		dm		)
-		__field(	__u8,		tm		)
+		__field(	__u16,		tm		)
 		__field(	__u8,		vec		)
 	),
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a24d080..6e36152 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -714,7 +714,6 @@
 	u64 		      msr_guest_kernel_gs_base;
 #endif
 
-	u64 		      arch_capabilities;
 	u64 		      spec_ctrl;
 
 	u32 vm_entry_controls_shadow;
@@ -3209,12 +3208,6 @@
 
 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
 		break;
-	case MSR_IA32_ARCH_CAPABILITIES:
-		if (!msr_info->host_initiated &&
-		    !guest_cpuid_has_arch_capabilities(vcpu))
-			return 1;
-		msr_info->data = to_vmx(vcpu)->arch_capabilities;
-		break;
 	case MSR_IA32_SYSENTER_CS:
 		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
 		break;
@@ -3376,11 +3369,6 @@
 		vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
 					      MSR_TYPE_W);
 		break;
-	case MSR_IA32_ARCH_CAPABILITIES:
-		if (!msr_info->host_initiated)
-			return 1;
-		vmx->arch_capabilities = data;
-		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
 			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -5468,8 +5456,6 @@
 		++vmx->nmsrs;
 	}
 
-	vmx->arch_capabilities = kvm_get_arch_capabilities();
-
 	vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
 
 	/* 22.2.1, 20.8.1 */
@@ -5965,6 +5951,7 @@
 static int handle_triple_fault(struct kvm_vcpu *vcpu)
 {
 	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+	vcpu->mmio_needed = 0;
 	return 0;
 }
 
@@ -7046,6 +7033,10 @@
 	/* Addr = segment_base + offset */
 	/* offset = base + [index * scale] + displacement */
 	off = exit_qualification; /* holds the displacement */
+	if (addr_size == 1)
+		off = (gva_t)sign_extend64(off, 31);
+	else if (addr_size == 0)
+		off = (gva_t)sign_extend64(off, 15);
 	if (base_is_valid)
 		off += kvm_register_read(vcpu, base_reg);
 	if (index_is_valid)
@@ -7088,10 +7079,16 @@
 		/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
 		 */
 		exn = (s.unusable != 0);
-		/* Protected mode: #GP(0)/#SS(0) if the memory
-		 * operand is outside the segment limit.
+
+		/*
+		 * Protected mode: #GP(0)/#SS(0) if the memory operand is
+		 * outside the segment limit.  All CPUs that support VMX ignore
+		 * limit checks for flat segments, i.e. segments with base==0,
+		 * limit==0xffffffff and of type expand-up data or code.
 		 */
-		exn = exn || (off + sizeof(u64) > s.limit);
+		if (!(s.base == 0 && s.limit == 0xffffffff &&
+		     ((s.type & 8) || !(s.type & 4))))
+			exn = exn || (off + sizeof(u64) > s.limit);
 	}
 	if (exn) {
 		kvm_queue_exception_e(vcpu,
@@ -9208,8 +9205,11 @@
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 
+	/* L1D Flush includes CPU buffer clear to mitigate MDS */
 	if (static_branch_unlikely(&vmx_l1d_should_flush))
 		vmx_l1d_flush(vcpu);
+	else if (static_branch_unlikely(&mds_user_clear))
+		mds_clear_cpu_buffers();
 
 	asm(
 		/* Store host registers */
@@ -9568,8 +9568,8 @@
 	return ERR_PTR(err);
 }
 
-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
 
 static int vmx_vm_init(struct kvm *kvm)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5a35fee..1f32c4e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1073,11 +1073,8 @@
 	return 0;
 }
 
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-	if (efer & efer_reserved_bits)
-		return false;
-
 	if (efer & EFER_FFXSR) {
 		struct kvm_cpuid_entry2 *feat;
 
@@ -1095,19 +1092,33 @@
 	}
 
 	return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+	if (efer & efer_reserved_bits)
+		return false;
+
+	return __kvm_valid_efer(vcpu, efer);
 }
 EXPORT_SYMBOL_GPL(kvm_valid_efer);
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	u64 old_efer = vcpu->arch.efer;
+	u64 efer = msr_info->data;
 
-	if (!kvm_valid_efer(vcpu, efer))
-		return 1;
+	if (efer & efer_reserved_bits)
+		return false;
 
-	if (is_paging(vcpu)
-	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
-		return 1;
+	if (!msr_info->host_initiated) {
+		if (!__kvm_valid_efer(vcpu, efer))
+			return 1;
+
+		if (is_paging(vcpu) &&
+		    (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+			return 1;
+	}
 
 	efer &= ~EFER_LMA;
 	efer |= vcpu->arch.efer & EFER_LMA;
@@ -2197,8 +2208,13 @@
 		if (msr_info->host_initiated)
 			vcpu->arch.microcode_version = data;
 		break;
+	case MSR_IA32_ARCH_CAPABILITIES:
+		if (!msr_info->host_initiated)
+			return 1;
+		vcpu->arch.arch_capabilities = data;
+		break;
 	case MSR_EFER:
-		return set_efer(vcpu, data);
+		return set_efer(vcpu, msr_info);
 	case MSR_K7_HWCR:
 		data &= ~(u64)0x40;	/* ignore flush filter disable */
 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
@@ -2473,6 +2489,12 @@
 	case MSR_IA32_UCODE_REV:
 		msr_info->data = vcpu->arch.microcode_version;
 		break;
+	case MSR_IA32_ARCH_CAPABILITIES:
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has_arch_capabilities(vcpu))
+			return 1;
+		msr_info->data = vcpu->arch.arch_capabilities;
+		break;
 	case MSR_MTRRcap:
 	case 0x200 ... 0x2ff:
 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -6769,6 +6791,7 @@
 		}
 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+			vcpu->mmio_needed = 0;
 			r = 0;
 			goto out;
 		}
@@ -7671,6 +7694,7 @@
 {
 	int r;
 
+	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
 	kvm_vcpu_mtrr_init(vcpu);
 	r = vcpu_load(vcpu);
 	if (r)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 0da3945..f7f8aa6 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -836,7 +836,7 @@
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 		tsk->comm, task_pid_nr(tsk), address,
 		(void *)regs->ip, (void *)regs->sp, error_code);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 90801a8..ce092a6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -790,7 +790,7 @@
 
 	pages = generic_max_swapfile_size();
 
-	if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+	if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
 		/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
 		unsigned long long l1tf_limit = l1tf_pfn_limit();
 		/*
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 3f729e2..12522db 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -9,6 +9,7 @@
 #include <linux/spinlock.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/cpu.h>
 
 #undef pr_fmt
 #define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
@@ -297,7 +298,8 @@
 			goto skip;
 	}
 
-	if (cmdline_find_option_bool(boot_command_line, "nopti"))
+	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+	    cpu_mitigations_off())
 		goto disable;
 
 skip:
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e30baa8..dff8ac2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -251,7 +251,7 @@
 		if (pgd_val(pgd) != 0) {
 			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-			pgdp[i] = native_make_pgd(0);
+			pgd_clear(&pgdp[i]);
 
 			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
 			pmd_free(mm, pmd);
@@ -419,7 +419,7 @@
 	int changed = !pte_same(*ptep, entry);
 
 	if (changed && dirty) {
-		*ptep = entry;
+		set_pte(ptep, entry);
 		pte_update(vma->vm_mm, address, ptep);
 	}
 
@@ -436,7 +436,7 @@
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
 	if (changed && dirty) {
-		*pmdp = entry;
+		set_pmd(pmdp, entry);
 		/*
 		 * We had a write-protection fault here and changed the pmd
 		 * to to more permissive. No need to flush the TLB for that,
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index eac92e2..a112bb1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,12 @@
  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
+/*
+ * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_ibpb.
+ */
+#define LAST_USER_MM_IBPB	0x1UL
+
 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
 
 struct flush_tlb_info {
@@ -101,33 +107,101 @@
 	local_irq_restore(flags);
 }
 
+static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+{
+	unsigned long next_tif = task_thread_info(next)->flags;
+	unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+
+	return (unsigned long)next->mm | ibpb;
+}
+
+static void cond_ibpb(struct task_struct *next)
+{
+	if (!next || !next->mm)
+		return;
+
+	/*
+	 * Both, the conditional and the always IBPB mode use the mm
+	 * pointer to avoid the IBPB when switching between tasks of the
+	 * same process. Using the mm pointer instead of mm->context.ctx_id
+	 * opens a hypothetical hole vs. mm_struct reuse, which is more or
+	 * less impossible to control by an attacker. Aside of that it
+	 * would only affect the first schedule so the theoretically
+	 * exposed data is not really interesting.
+	 */
+	if (static_branch_likely(&switch_mm_cond_ibpb)) {
+		unsigned long prev_mm, next_mm;
+
+		/*
+		 * This is a bit more complex than the always mode because
+		 * it has to handle two cases:
+		 *
+		 * 1) Switch from a user space task (potential attacker)
+		 *    which has TIF_SPEC_IB set to a user space task
+		 *    (potential victim) which has TIF_SPEC_IB not set.
+		 *
+		 * 2) Switch from a user space task (potential attacker)
+		 *    which has TIF_SPEC_IB not set to a user space task
+		 *    (potential victim) which has TIF_SPEC_IB set.
+		 *
+		 * This could be done by unconditionally issuing IBPB when
+		 * a task which has TIF_SPEC_IB set is either scheduled in
+		 * or out. Though that results in two flushes when:
+		 *
+		 * - the same user space task is scheduled out and later
+		 *   scheduled in again and only a kernel thread ran in
+		 *   between.
+		 *
+		 * - a user space task belonging to the same process is
+		 *   scheduled in after a kernel thread ran in between
+		 *
+		 * - a user space task belonging to the same process is
+		 *   scheduled in immediately.
+		 *
+		 * Optimize this with reasonably small overhead for the
+		 * above cases. Mangle the TIF_SPEC_IB bit into the mm
+		 * pointer of the incoming task which is stored in
+		 * cpu_tlbstate.last_user_mm_ibpb for comparison.
+		 */
+		next_mm = mm_mangle_tif_spec_ib(next);
+		prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
+
+		/*
+		 * Issue IBPB only if the mm's are different and one or
+		 * both have the IBPB bit set.
+		 */
+		if (next_mm != prev_mm &&
+		    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+			indirect_branch_prediction_barrier();
+
+		this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
+	}
+
+	if (static_branch_unlikely(&switch_mm_always_ibpb)) {
+		/*
+		 * Only flush when switching to a user space task with a
+		 * different context than the user space task which ran
+		 * last on this CPU.
+		 */
+		if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+			indirect_branch_prediction_barrier();
+			this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
+		}
+	}
+}
+
 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 			struct task_struct *tsk)
 {
 	unsigned cpu = smp_processor_id();
 
 	if (likely(prev != next)) {
-		u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
-
 		/*
 		 * Avoid user/user BTB poisoning by flushing the branch
 		 * predictor when switching between processes. This stops
 		 * one process from doing Spectre-v2 attacks on another.
-		 *
-		 * As an optimization, flush indirect branches only when
-		 * switching into processes that disable dumping. This
-		 * protects high value processes like gpg, without having
-		 * too high performance overhead. IBPB is *expensive*!
-		 *
-		 * This will not flush branches when switching into kernel
-		 * threads. It will also not flush if we switch to idle
-		 * thread and back to the same process. It will flush if we
-		 * switch to a different non-dumpable process.
 		 */
-		if (tsk && tsk->mm &&
-		    tsk->mm->context.ctx_id != last_ctx_id &&
-		    get_dumpable(tsk->mm) != SUID_DUMP_USER)
-			indirect_branch_prediction_barrier();
+		cond_ibpb(tsk);
 
 		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
 			/*
@@ -143,14 +217,6 @@
 				set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
 		}
 
-		/*
-		 * Record last user mm's context id, so we can avoid
-		 * flushing branch buffer with IBPB if we switch back
-		 * to the same user.
-		 */
-		if (next != &init_mm)
-			this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
-
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		this_cpu_write(cpu_tlbstate.active_mm, next);
 
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index d49d3be..ecb5866 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -154,8 +154,8 @@
 	  (kernel_ulong_t)&drv_data }
 
 static const struct x86_cpu_id intel_punit_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,  punit_device_tng),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,  punit_device_tng),
 	ICPU(INTEL_FAM6_ATOM_AIRMONT,	  punit_device_cht),
 	{}
 };
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 53cace2..054e276 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -82,12 +82,8 @@
 	/*
 	 * descriptor tables
 	 */
-#ifdef CONFIG_X86_32
 	store_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
-	store_idt((struct desc_ptr *)&ctxt->idt_limit);
-#endif
+
 	/*
 	 * We save it here, but restore it only in the hibernate case.
 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +99,18 @@
 	/*
 	 * segment registers
 	 */
-#ifdef CONFIG_X86_32
-	savesegment(es, ctxt->es);
-	savesegment(fs, ctxt->fs);
+#ifdef CONFIG_X86_32_LAZY_GS
 	savesegment(gs, ctxt->gs);
-	savesegment(ss, ctxt->ss);
-#else
-/* CONFIG_X86_64 */
-	asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
-	asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
-	asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
-	asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
-	asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
+#endif
+#ifdef CONFIG_X86_64
+	savesegment(gs, ctxt->gs);
+	savesegment(fs, ctxt->fs);
+	savesegment(ds, ctxt->ds);
+	savesegment(es, ctxt->es);
 
 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
-	rdmsrl(MSR_GS_BASE, ctxt->gs_base);
-	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+	rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
 	mtrr_save_fixed_ranges(NULL);
 
 	rdmsrl(MSR_EFER, ctxt->efer);
@@ -178,6 +170,9 @@
 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
 
 	syscall_init();				/* This sets MSR_*STAR and related */
+#else
+	if (boot_cpu_has(X86_FEATURE_SEP))
+		enable_sep_cpu();
 #endif
 	load_TR_desc();				/* This does ltr */
 	load_mm_ldt(current->active_mm);	/* This does lldt */
@@ -186,9 +181,12 @@
 }
 
 /**
- *	__restore_processor_state - restore the contents of CPU registers saved
- *		by __save_processor_state()
- *	@ctxt - structure to load the registers contents from
+ * __restore_processor_state - restore the contents of CPU registers saved
+ *                             by __save_processor_state()
+ * @ctxt - structure to load the registers contents from
+ *
+ * The asm code that gets us here will have restored a usable GDT, although
+ * it will be pointing to the wrong alias.
  */
 static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
@@ -211,46 +209,52 @@
 	write_cr2(ctxt->cr2);
 	write_cr0(ctxt->cr0);
 
-	/*
-	 * now restore the descriptor tables to their proper values
-	 * ltr is done i fix_processor_context().
-	 */
-#ifdef CONFIG_X86_32
+	/* Restore the IDT. */
 	load_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
-	load_idt((const struct desc_ptr *)&ctxt->idt_limit);
-#endif
 
 	/*
-	 * segment registers
+	 * Just in case the asm code got us here with the SS, DS, or ES
+	 * out of sync with the GDT, update them.
 	 */
-#ifdef CONFIG_X86_32
+	loadsegment(ss, __KERNEL_DS);
+	loadsegment(ds, __USER_DS);
+	loadsegment(es, __USER_DS);
+
+	/*
+	 * Restore percpu access.  Percpu access can happen in exception
+	 * handlers or in complicated helpers like load_gs_index().
+	 */
+#ifdef CONFIG_X86_64
+	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+#else
+	loadsegment(fs, __KERNEL_PERCPU);
+	loadsegment(gs, __KERNEL_STACK_CANARY);
+#endif
+
+	/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
+	fix_processor_context();
+
+	/*
+	 * Now that we have descriptor tables fully restored and working
+	 * exception handling, restore the usermode segments.
+	 */
+#ifdef CONFIG_X86_64
+	loadsegment(ds, ctxt->es);
 	loadsegment(es, ctxt->es);
 	loadsegment(fs, ctxt->fs);
-	loadsegment(gs, ctxt->gs);
-	loadsegment(ss, ctxt->ss);
+	load_gs_index(ctxt->gs);
 
 	/*
-	 * sysenter MSRs
+	 * Restore FSBASE and GSBASE after restoring the selectors, since
+	 * restoring the selectors clobbers the bases.  Keep in mind
+	 * that MSR_KERNEL_GS_BASE is horribly misnamed.
 	 */
-	if (boot_cpu_has(X86_FEATURE_SEP))
-		enable_sep_cpu();
-#else
-/* CONFIG_X86_64 */
-	asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
-	asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
-	asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
-	load_gs_index(ctxt->gs);
-	asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
-
 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
-	wrmsrl(MSR_GS_BASE, ctxt->gs_base);
-	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+#elif defined(CONFIG_X86_32_LAZY_GS)
+	loadsegment(gs, ctxt->gs);
 #endif
 
-	fix_processor_context();
-
 	do_fpu_end();
 	x86_platform.restore_sched_clock_state();
 	mtrr_bp_restore();
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 25012ab..ce5f431 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -47,7 +47,7 @@
 targets += realmode.lds
 $(obj)/realmode.lds: $(obj)/pasyms.h
 
-LDFLAGS_realmode.elf := --emit-relocs -T
+LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
 CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
 
 targets += realmode.elf
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 7538d80..4835930 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -272,10 +272,14 @@
 	return 1;
 }
 
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
 unsigned long return_address(unsigned level)
 {
 	struct return_addr_data r = {
-		.skip = level + 1,
+		.skip = level,
 	};
 	walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
 	return r.addr;
diff --git a/block/bio.c b/block/bio.c
index 8bd6e0d..7c0f0d1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1230,8 +1230,11 @@
 			}
 		}
 
-		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+			if (!map_data)
+				__free_page(page);
 			break;
+		}
 
 		len -= bytes;
 		offset = 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6a90155..ab70b1a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -222,6 +222,7 @@
 
 	unsigned int weight;
 	unsigned int leaf_weight;
+	u64 group_idle;
 };
 
 /* This is per cgroup per device grouping structure */
@@ -307,6 +308,7 @@
 	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 	struct cfq_queue *async_idle_cfqq;
 
+	u64 group_idle;
 };
 
 struct cfq_io_cq {
@@ -803,6 +805,17 @@
 
 #endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 
+static inline u64 get_group_idle(struct cfq_data *cfqd)
+{
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+	struct cfq_queue *cfqq = cfqd->active_queue;
+
+	if (cfqq && cfqq->cfqg)
+		return cfqq->cfqg->group_idle;
+#endif
+	return cfqd->cfq_group_idle;
+}
+
 #define cfq_log(cfqd, fmt, args...)	\
 	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 
@@ -823,7 +836,7 @@
 	if (!sample_valid(ttime->ttime_samples))
 		return false;
 	if (group_idle)
-		slice = cfqd->cfq_group_idle;
+		slice = get_group_idle(cfqd);
 	else
 		slice = cfqd->cfq_slice_idle;
 	return ttime->ttime_mean > slice;
@@ -1626,6 +1639,7 @@
 
 	cgd->weight = weight;
 	cgd->leaf_weight = weight;
+	cgd->group_idle = cfq_group_idle;
 }
 
 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
@@ -1670,6 +1684,7 @@
 
 	cfqg->weight = cgd->weight;
 	cfqg->leaf_weight = cgd->leaf_weight;
+	cfqg->group_idle = cgd->group_idle;
 }
 
 static void cfq_pd_offline(struct blkg_policy_data *pd)
@@ -1791,6 +1806,19 @@
 	return 0;
 }
 
+static int cfq_print_group_idle(struct seq_file *sf, void *v)
+{
+	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+	struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
+	u64 val = 0;
+
+	if (cgd)
+		val = cgd->group_idle;
+
+	seq_printf(sf, "%llu\n", div_u64(val, NSEC_PER_USEC));
+	return 0;
+}
+
 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
 					char *buf, size_t nbytes, loff_t off,
 					bool on_dfl, bool is_leaf_weight)
@@ -1912,6 +1940,37 @@
 	return __cfq_set_weight(css, val, false, false, true);
 }
 
+static int cfq_set_group_idle(struct cgroup_subsys_state *css,
+			       struct cftype *cft, u64 val)
+{
+	struct blkcg *blkcg = css_to_blkcg(css);
+	struct cfq_group_data *cfqgd;
+	struct blkcg_gq *blkg;
+	int ret = 0;
+
+	spin_lock_irq(&blkcg->lock);
+	cfqgd = blkcg_to_cfqgd(blkcg);
+	if (!cfqgd) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cfqgd->group_idle = val * NSEC_PER_USEC;
+
+	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+		if (!cfqg)
+			continue;
+
+		cfqg->group_idle = cfqgd->group_idle;
+	}
+
+out:
+	spin_unlock_irq(&blkcg->lock);
+	return ret;
+}
+
 static int cfqg_print_stat(struct seq_file *sf, void *v)
 {
 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -2057,6 +2116,11 @@
 		.seq_show = cfq_print_leaf_weight,
 		.write_u64 = cfq_set_leaf_weight,
 	},
+	{
+		.name = "group_idle",
+		.seq_show = cfq_print_group_idle,
+		.write_u64 = cfq_set_group_idle,
+	},
 
 	/* statistics, covers only the tasks in the cfqg */
 	{
@@ -2952,7 +3016,7 @@
 	 * otherwise we still have a problem with sync vs async workloads.
 	 */
 	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
-		!cfqd->cfq_group_idle)
+		!get_group_idle(cfqd))
 		return;
 
 	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -2963,9 +3027,8 @@
 	 */
 	if (!cfq_should_idle(cfqd, cfqq)) {
 		/* no queue idling. Check for group idling */
-		if (cfqd->cfq_group_idle)
-			group_idle = cfqd->cfq_group_idle;
-		else
+		group_idle = get_group_idle(cfqd);
+		if (!group_idle)
 			return;
 	}
 
@@ -3006,7 +3069,7 @@
 	cfq_mark_cfqq_wait_request(cfqq);
 
 	if (group_idle)
-		sl = cfqd->cfq_group_idle;
+		sl = group_idle;
 	else
 		sl = cfqd->cfq_slice_idle;
 
@@ -3355,7 +3418,7 @@
 	 * this group, wait for requests to complete.
 	 */
 check_group_idle:
-	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
+	if (get_group_idle(cfqd) && cfqq->cfqg->nr_cfqq == 1 &&
 	    cfqq->cfqg->dispatched &&
 	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
 		cfqq = NULL;
@@ -3914,7 +3977,7 @@
 			cfqd->cfq_slice_idle);
 	}
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
-	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
+	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, get_group_idle(cfqd));
 #endif
 }
 
@@ -4308,7 +4371,7 @@
 		if (cfq_should_wait_busy(cfqd, cfqq)) {
 			u64 extend_sl = cfqd->cfq_slice_idle;
 			if (!cfqd->cfq_slice_idle)
-				extend_sl = cfqd->cfq_group_idle;
+				extend_sl = get_group_idle(cfqd);
 			cfqq->slice_end = now + extend_sl;
 			cfq_mark_cfqq_wait_busy(cfqq);
 			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index 7bf19df..428186ff 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -6,7 +6,7 @@
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
 FILES="
 arch/arm64/boot/Image.gz
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index a25afa0..7a79036 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -6,7 +6,7 @@
 EXTRA_CMDS=''
 KERNEL_DIR=common
 POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r353983c/bin
 LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
 FILES="
 arch/x86/boot/bzImage
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 90d73a2..9a4e877 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -85,17 +85,17 @@
 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 {
 	unsigned int alignmask = walk->alignmask;
-	unsigned int nbytes = walk->entrylen;
 
 	walk->data -= walk->offset;
 
-	if (nbytes && walk->offset & alignmask && !err) {
-		walk->offset = ALIGN(walk->offset, alignmask + 1);
-		nbytes = min(nbytes,
-			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
-		walk->entrylen -= nbytes;
+	if (walk->entrylen && (walk->offset & alignmask) && !err) {
+		unsigned int nbytes;
 
+		walk->offset = ALIGN(walk->offset, alignmask + 1);
+		nbytes = min(walk->entrylen,
+			     (unsigned int)(PAGE_SIZE - walk->offset));
 		if (nbytes) {
+			walk->entrylen -= nbytes;
 			walk->data += walk->offset;
 			return nbytes;
 		}
@@ -115,7 +115,7 @@
 	if (err)
 		return err;
 
-	if (nbytes) {
+	if (walk->entrylen) {
 		walk->offset = 0;
 		walk->pg++;
 		return hash_walk_next(walk);
@@ -189,6 +189,21 @@
 	return ret;
 }
 
+static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
+			  unsigned int keylen)
+{
+	return -ENOSYS;
+}
+
+static void ahash_set_needkey(struct crypto_ahash *tfm)
+{
+	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
+
+	if (tfm->setkey != ahash_nosetkey &&
+	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+
 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 			unsigned int keylen)
 {
@@ -200,20 +215,16 @@
 	else
 		err = tfm->setkey(tfm, key, keylen);
 
-	if (err)
+	if (unlikely(err)) {
+		ahash_set_needkey(tfm);
 		return err;
+	}
 
 	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
 
-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
-			  unsigned int keylen)
-{
-	return -ENOSYS;
-}
-
 static inline unsigned int ahash_align_buffer_size(unsigned len,
 						   unsigned long mask)
 {
@@ -482,8 +493,7 @@
 
 	if (alg->setkey) {
 		hash->setkey = alg->setkey;
-		if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
-			crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
+		ahash_set_needkey(hash);
 	}
 	if (alg->export)
 		hash->export = alg->export;
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index c0f699c..e41f3d2 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -647,8 +647,8 @@
 
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "%s(%s,%s)", name, chacha_name,
-		     poly_name) >= CRYPTO_MAX_ALG_NAME)
+		     "%s(%s,%s)", name, chacha->base.cra_name,
+		     poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->base.cra_driver_name,
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29..d08048a 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
-	*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+	*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -77,15 +76,13 @@
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
diff --git a/crypto/gcm.c b/crypto/gcm.c
index dd33fbd..398f048 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -616,7 +616,6 @@
 
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *ghash_name)
 {
@@ -657,7 +656,8 @@
 		goto err_free_inst;
 
 	err = -EINVAL;
-	if (ghash->digestsize != 16)
+	if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+	    ghash->digestsize != 16)
 		goto err_drop_ghash;
 
 	crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -669,24 +669,24 @@
 
 	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
 
-	/* We only support 16-byte blocks. */
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
-		goto out_put_ctr;
-
-	/* Not a stream cipher? */
+	/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
 	err = -EINVAL;
-	if (ctr->base.cra_blocksize != 1)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16 ||
+	    ctr->base.cra_blocksize != 1)
 		goto out_put_ctr;
 
 	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+		goto out_put_ctr;
+
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
 		     ghash_alg->cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = (ghash->base.cra_flags |
 				    ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -728,7 +728,6 @@
 {
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -738,12 +737,7 @@
 	    CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, "ghash");
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
 }
 
 static struct crypto_template crypto_gcm_tmpl = {
@@ -757,7 +751,6 @@
 {
 	const char *ctr_name;
 	const char *ghash_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
@@ -767,12 +760,7 @@
 	if (IS_ERR(ghash_name))
 		return PTR_ERR(ghash_name);
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
-		     ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, ghash_name);
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
 }
 
 static struct crypto_template crypto_gcm_base_tmpl = {
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index f654965..de81f71 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -52,7 +52,7 @@
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
 	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
+	u8 * const iv = walk->iv;
 
 	do {
 		crypto_xor(iv, src, bsize);
@@ -76,7 +76,7 @@
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
-	u8 *iv = walk->iv;
+	u8 * const iv = walk->iv;
 	u8 tmpbuf[bsize];
 
 	do {
@@ -89,8 +89,6 @@
 		src += bsize;
 	} while ((nbytes -= bsize) >= bsize);
 
-	memcpy(walk->iv, iv, bsize);
-
 	return nbytes;
 }
 
@@ -130,7 +128,7 @@
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
 	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
+	u8 * const iv = walk->iv;
 
 	do {
 		fn(crypto_cipher_tfm(tfm), dst, src);
@@ -142,8 +140,6 @@
 		dst += bsize;
 	} while ((nbytes -= bsize) >= bsize);
 
-	memcpy(walk->iv, iv, bsize);
-
 	return nbytes;
 }
 
@@ -156,7 +152,7 @@
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
-	u8 *iv = walk->iv;
+	u8 * const iv = walk->iv;
 	u8 tmpbuf[bsize];
 
 	do {
@@ -169,8 +165,6 @@
 		src += bsize;
 	} while ((nbytes -= bsize) >= bsize);
 
-	memcpy(walk->iv, iv, bsize);
-
 	return nbytes;
 }
 
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0ee..319d996 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -186,7 +186,7 @@
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt_block(desc, &walk, 64);
 
-	salsa20_ivsetup(ctx, walk.iv);
+	salsa20_ivsetup(ctx, desc->info);
 
 	while (walk.nbytes >= 64) {
 		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
diff --git a/crypto/shash.c b/crypto/shash.c
index 4f047c7..a1c7609 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -52,6 +52,13 @@
 	return err;
 }
 
+static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
+{
+	if (crypto_shash_alg_has_setkey(alg) &&
+	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+}
+
 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
 			unsigned int keylen)
 {
@@ -64,8 +71,10 @@
 	else
 		err = shash->setkey(tfm, key, keylen);
 
-	if (err)
+	if (unlikely(err)) {
+		shash_set_needkey(tfm, shash);
 		return err;
+	}
 
 	crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 	return 0;
@@ -367,7 +376,8 @@
 	crt->final = shash_async_final;
 	crt->finup = shash_async_finup;
 	crt->digest = shash_async_digest;
-	crt->setkey = shash_async_setkey;
+	if (crypto_shash_alg_has_setkey(alg))
+		crt->setkey = shash_async_setkey;
 
 	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
 				    CRYPTO_TFM_NEED_KEY);
@@ -389,9 +399,7 @@
 
 	hash->descsize = alg->descsize;
 
-	if (crypto_shash_alg_has_setkey(alg) &&
-	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
-		crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
+	shash_set_needkey(hash, alg);
 
 	return 0;
 }
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 4a349b7..125669f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -4527,7 +4527,49 @@
 		.psize		= 80,
 		.digest		= "\x13\x00\x00\x00\x00\x00\x00\x00"
 				  "\x00\x00\x00\x00\x00\x00\x00\x00",
-	},
+	}, { /* Regression test for overflow in AVX2 implementation */
+		.plaintext	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff\xff\xff\xff\xff"
+				  "\xff\xff\xff\xff",
+		.psize		= 300,
+		.digest		= "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+				  "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+	}
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 957d3fa..8e38249 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -243,7 +243,7 @@
 #define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
 
 static const struct x86_cpu_id lpss_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1),	/* Valleyview, Bay Trail */
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT),	/* Valleyview, Bay Trail */
 	ICPU(INTEL_FAM6_ATOM_AIRMONT),	/* Braswell, Cherry Trail */
 	{}
 };
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 667dc5c..ea05731 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2069,21 +2069,29 @@
 	return opregion;
 }
 
+/* Check if the chassis-type indicates there is no builtin LCD panel */
 static bool dmi_is_desktop(void)
 {
 	const char *chassis_type;
+	unsigned long type;
 
 	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
 	if (!chassis_type)
 		return false;
 
-	if (!strcmp(chassis_type, "3") || /*  3: Desktop */
-	    !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
-	    !strcmp(chassis_type, "5") || /*  5: Pizza Box */
-	    !strcmp(chassis_type, "6") || /*  6: Mini Tower */
-	    !strcmp(chassis_type, "7") || /*  7: Tower */
-	    !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
+	if (kstrtoul(chassis_type, 10, &type) != 0)
+		return false;
+
+	switch (type) {
+	case 0x03: /* Desktop */
+	case 0x04: /* Low Profile Desktop */
+	case 0x05: /* Pizza Box */
+	case 0x06: /* Mini Tower */
+	case 0x07: /* Tower */
+	case 0x10: /* Lunch Box */
+	case 0x11: /* Main Server Chassis */
 		return true;
+	}
 
 	return false;
 }
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 201c7ce..98b513d 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -202,11 +202,15 @@
 {
 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
 	const union acpi_object *of_compatible, *obj;
+	acpi_status status;
 	int len, count;
 	int i, nval;
 	char *c;
 
-	acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+	status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
 	/* DT strings are all in lower case */
 	for (c = buf.pointer; *c != '\0'; c++)
 		*c = tolower(*c);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 06cf742..31a0760 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -307,6 +307,13 @@
 		return -EINVAL;
 	}
 
+	if (out_obj->type != ACPI_TYPE_BUFFER) {
+		dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
+				dimm_name, cmd_name, out_obj->type);
+		rc = -EINVAL;
+		goto out;
+	}
+
 	if (call_pkg) {
 		call_pkg->nd_fw_size = out_obj->buffer.length;
 		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -325,13 +332,6 @@
 		return 0;
 	}
 
-	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
-		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
-				__func__, dimm_name, cmd_name, out_obj->type);
-		rc = -EINVAL;
-		goto out;
-	}
-
 	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
 		dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
 				dimm_name, cmd_name, out_obj->buffer.length);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13a..4a76000 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -443,9 +443,13 @@
 
 	/*
 	 * The spec requires that bit 4 always be 1. If it's not set, assume
-	 * that the implementation doesn't support an SBS charger
+	 * that the implementation doesn't support an SBS charger.
+	 *
+	 * And on some MacBooks a status of 0xffff is always returned, no
+	 * matter whether the charger is plugged in or not, which is also
+	 * wrong, so ignore the SBS charger for those too.
 	 */
-	if (!((status >> 4) & 0x1))
+	if (!((status >> 4) & 0x1) || status == 0xffff)
 		return -ENODEV;
 
 	sbs->charger_present = (status >> 15) & 0x1;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 342c4de..01d5f5d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -668,6 +668,26 @@
 };
 
 /**
+ * struct binder_object - union of flat binder object types
+ * @hdr:   generic object header
+ * @fbo:   binder object (nodes and refs)
+ * @fdo:   file descriptor object
+ * @bbo:   binder buffer pointer
+ * @fdao:  file descriptor array
+ *
+ * Used for type-independent object copies
+ */
+struct binder_object {
+	union {
+		struct binder_object_header hdr;
+		struct flat_binder_object fbo;
+		struct binder_fd_object fdo;
+		struct binder_buffer_object bbo;
+		struct binder_fd_array_object fdao;
+	};
+};
+
+/**
  * binder_proc_lock() - Acquire outer lock for given binder_proc
  * @proc:         struct binder_proc to acquire
  *
@@ -2204,26 +2224,34 @@
 }
 
 /**
- * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * binder_get_object() - gets object and checks for valid metadata
+ * @proc:	binder_proc owning the buffer
  * @buffer:	binder_buffer that we're parsing.
- * @offset:	offset in the buffer at which to validate an object.
+ * @offset:	offset in the @buffer at which to validate an object.
+ * @object:	struct binder_object to read into
  *
  * Return:	If there's a valid metadata object at @offset in @buffer, the
- *		size of that object. Otherwise, it returns zero.
+ *		size of that object. Otherwise, it returns zero. The object
+ *		is read into the struct binder_object pointed to by @object.
  */
-static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+static size_t binder_get_object(struct binder_proc *proc,
+				struct binder_buffer *buffer,
+				unsigned long offset,
+				struct binder_object *object)
 {
-	/* Check if we can read a header first */
+	size_t read_size;
 	struct binder_object_header *hdr;
 	size_t object_size = 0;
 
-	if (buffer->data_size < sizeof(*hdr) ||
-	    offset > buffer->data_size - sizeof(*hdr) ||
+	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
 	    !IS_ALIGNED(offset, sizeof(u32)))
 		return 0;
+	binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+				      offset, read_size);
 
-	/* Ok, now see if we can read a complete object. */
-	hdr = (struct binder_object_header *)(buffer->data + offset);
+	/* Ok, now see if we read a complete object. */
+	hdr = &object->hdr;
 	switch (hdr->type) {
 	case BINDER_TYPE_BINDER:
 	case BINDER_TYPE_WEAK_BINDER:
@@ -2252,10 +2280,13 @@
 
 /**
  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @proc:	binder_proc owning the buffer
  * @b:		binder_buffer containing the object
+ * @object:	struct binder_object to read into
  * @index:	index in offset array at which the binder_buffer_object is
  *		located
- * @start:	points to the start of the offset array
+ * @start_offset: points to the start of the offset array
+ * @object_offsetp: offset of @object read from @b
  * @num_valid:	the number of valid offsets in the offset array
  *
  * Return:	If @index is within the valid range of the offset array
@@ -2266,34 +2297,46 @@
  *		Note that the offset found in index @index itself is not
  *		verified; this function assumes that @num_valid elements
  *		from @start were previously verified to have valid offsets.
+ *		If @object_offsetp is non-NULL, then the offset within
+ *		@b is written to it.
  */
-static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
-							binder_size_t index,
-							binder_size_t *start,
-							binder_size_t num_valid)
+static struct binder_buffer_object *binder_validate_ptr(
+						struct binder_proc *proc,
+						struct binder_buffer *b,
+						struct binder_object *object,
+						binder_size_t index,
+						binder_size_t start_offset,
+						binder_size_t *object_offsetp,
+						binder_size_t num_valid)
 {
-	struct binder_buffer_object *buffer_obj;
-	binder_size_t *offp;
+	size_t object_size;
+	binder_size_t object_offset;
+	unsigned long buffer_offset;
 
 	if (index >= num_valid)
 		return NULL;
 
-	offp = start + index;
-	buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
-	if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+	buffer_offset = start_offset + sizeof(binder_size_t) * index;
+	binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+				      b, buffer_offset, sizeof(object_offset));
+	object_size = binder_get_object(proc, b, object_offset, object);
+	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
 		return NULL;
+	if (object_offsetp)
+		*object_offsetp = object_offset;
 
-	return buffer_obj;
+	return &object->bbo;
 }
 
 /**
  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @proc:		binder_proc owning the buffer
  * @b:			transaction buffer
- * @objects_start	start of objects buffer
- * @buffer:		binder_buffer_object in which to fix up
- * @offset:		start offset in @buffer to fix up
- * @last_obj:		last binder_buffer_object that we fixed up in
- * @last_min_offset:	minimum fixup offset in @last_obj
+ * @objects_start_offset: offset to start of objects buffer
+ * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
+ * @fixup_offset:	start offset in @buffer to fix up
+ * @last_obj_offset:	offset to last binder_buffer_object that we fixed
+ * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
  *
  * Return:		%true if a fixup in buffer @buffer at offset @offset is
  *			allowed.
@@ -2324,63 +2367,83 @@
  *   C (parent = A, offset = 16)
  *     D (parent = B, offset = 0) // B is not A or any of A's parents
  */
-static bool binder_validate_fixup(struct binder_buffer *b,
-				  binder_size_t *objects_start,
-				  struct binder_buffer_object *buffer,
+static bool binder_validate_fixup(struct binder_proc *proc,
+				  struct binder_buffer *b,
+				  binder_size_t objects_start_offset,
+				  binder_size_t buffer_obj_offset,
 				  binder_size_t fixup_offset,
-				  struct binder_buffer_object *last_obj,
+				  binder_size_t last_obj_offset,
 				  binder_size_t last_min_offset)
 {
-	if (!last_obj) {
+	if (!last_obj_offset) {
 		/* Nothing to fix up in */
 		return false;
 	}
 
-	while (last_obj != buffer) {
+	while (last_obj_offset != buffer_obj_offset) {
+		unsigned long buffer_offset;
+		struct binder_object last_object;
+		struct binder_buffer_object *last_bbo;
+		size_t object_size = binder_get_object(proc, b, last_obj_offset,
+						       &last_object);
+		if (object_size != sizeof(*last_bbo))
+			return false;
+
+		last_bbo = &last_object.bbo;
 		/*
 		 * Safe to retrieve the parent of last_obj, since it
 		 * was already previously verified by the driver.
 		 */
-		if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
 			return false;
-		last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
-		last_obj = (struct binder_buffer_object *)
-			(b->data + *(objects_start + last_obj->parent));
+		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
+		buffer_offset = objects_start_offset +
+			sizeof(binder_size_t) * last_bbo->parent,
+		binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
+					      b, buffer_offset,
+					      sizeof(last_obj_offset));
 	}
 	return (fixup_offset >= last_min_offset);
 }
 
 static void binder_transaction_buffer_release(struct binder_proc *proc,
 					      struct binder_buffer *buffer,
-					      binder_size_t *failed_at)
+					      binder_size_t failed_at,
+					      bool is_failure)
 {
-	binder_size_t *offp, *off_start, *off_end;
 	int debug_id = buffer->debug_id;
+	binder_size_t off_start_offset, buffer_offset, off_end_offset;
 
 	binder_debug(BINDER_DEBUG_TRANSACTION,
-		     "%d buffer release %d, size %zd-%zd, failed at %pK\n",
+		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
 		     proc->pid, buffer->debug_id,
-		     buffer->data_size, buffer->offsets_size, failed_at);
+		     buffer->data_size, buffer->offsets_size,
+		     (unsigned long long)failed_at);
 
 	if (buffer->target_node)
 		binder_dec_node(buffer->target_node, 1, 0);
 
-	off_start = (binder_size_t *)(buffer->data +
-				      ALIGN(buffer->data_size, sizeof(void *)));
-	if (failed_at)
-		off_end = failed_at;
-	else
-		off_end = (void *)off_start + buffer->offsets_size;
-	for (offp = off_start; offp < off_end; offp++) {
+	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
+	off_end_offset = is_failure ? failed_at :
+				off_start_offset + buffer->offsets_size;
+	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+	     buffer_offset += sizeof(binder_size_t)) {
 		struct binder_object_header *hdr;
-		size_t object_size = binder_validate_object(buffer, *offp);
+		size_t object_size;
+		struct binder_object object;
+		binder_size_t object_offset;
 
+		binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+					      buffer, buffer_offset,
+					      sizeof(object_offset));
+		object_size = binder_get_object(proc, buffer,
+						object_offset, &object);
 		if (object_size == 0) {
 			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
-			       debug_id, (u64)*offp, buffer->data_size);
+			       debug_id, (u64)object_offset, buffer->data_size);
 			continue;
 		}
-		hdr = (struct binder_object_header *)(buffer->data + *offp);
+		hdr = &object.hdr;
 		switch (hdr->type) {
 		case BINDER_TYPE_BINDER:
 		case BINDER_TYPE_WEAK_BINDER: {
@@ -2438,28 +2501,25 @@
 		case BINDER_TYPE_FDA: {
 			struct binder_fd_array_object *fda;
 			struct binder_buffer_object *parent;
-			uintptr_t parent_buffer;
-			u32 *fd_array;
+			struct binder_object ptr_object;
+			binder_size_t fda_offset;
 			size_t fd_index;
 			binder_size_t fd_buf_size;
+			binder_size_t num_valid;
 
+			num_valid = (buffer_offset - off_start_offset) /
+						sizeof(binder_size_t);
 			fda = to_binder_fd_array_object(hdr);
-			parent = binder_validate_ptr(buffer, fda->parent,
-						     off_start,
-						     offp - off_start);
+			parent = binder_validate_ptr(proc, buffer, &ptr_object,
+						     fda->parent,
+						     off_start_offset,
+						     NULL,
+						     num_valid);
 			if (!parent) {
 				pr_err("transaction release %d bad parent offset",
 				       debug_id);
 				continue;
 			}
-			/*
-			 * Since the parent was already fixed up, convert it
-			 * back to kernel address space to access it
-			 */
-			parent_buffer = parent->buffer -
-				binder_alloc_get_user_buffer_offset(
-						&proc->alloc);
-
 			fd_buf_size = sizeof(u32) * fda->num_fds;
 			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
 				pr_err("transaction release %d invalid number of fds (%lld)\n",
@@ -2473,9 +2533,29 @@
 				       debug_id, (u64)fda->num_fds);
 				continue;
 			}
-			fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
-			for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
-				task_close_fd(proc, fd_array[fd_index]);
+			/*
+			 * the source data for binder_buffer_object is visible
+			 * to user-space and the @buffer element is the user
+			 * pointer to the buffer_object containing the fd_array.
+			 * Convert the address to an offset relative to
+			 * the base of the transaction buffer.
+			 */
+			fda_offset =
+			    (parent->buffer - (uintptr_t)buffer->user_data) +
+			    fda->parent_offset;
+			for (fd_index = 0; fd_index < fda->num_fds;
+			     fd_index++) {
+				u32 fd;
+				binder_size_t offset = fda_offset +
+					fd_index * sizeof(fd);
+
+				binder_alloc_copy_from_buffer(&proc->alloc,
+							      &fd,
+							      buffer,
+							      offset,
+							      sizeof(fd));
+				task_close_fd(proc, fd);
+			}
 		} break;
 		default:
 			pr_err("transaction release %d bad object type %x\n",
@@ -2672,9 +2752,8 @@
 				     struct binder_transaction *in_reply_to)
 {
 	binder_size_t fdi, fd_buf_size, num_installed_fds;
+	binder_size_t fda_offset;
 	int target_fd;
-	uintptr_t parent_buffer;
-	u32 *fd_array;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *target_proc = t->to_proc;
 
@@ -2692,23 +2771,33 @@
 		return -EINVAL;
 	}
 	/*
-	 * Since the parent was already fixed up, convert it
-	 * back to the kernel address space to access it
+	 * the source data for binder_buffer_object is visible
+	 * to user-space and the @buffer element is the user
+	 * pointer to the buffer_object containing the fd_array.
+	 * Convert the address to an offset relative to
+	 * the base of the transaction buffer.
 	 */
-	parent_buffer = parent->buffer -
-		binder_alloc_get_user_buffer_offset(&target_proc->alloc);
-	fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
-	if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+		fda->parent_offset;
+	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
 		binder_user_error("%d:%d parent offset not aligned correctly.\n",
 				  proc->pid, thread->pid);
 		return -EINVAL;
 	}
 	for (fdi = 0; fdi < fda->num_fds; fdi++) {
-		target_fd = binder_translate_fd(fd_array[fdi], t, thread,
-						in_reply_to);
+		u32 fd;
+
+		binder_size_t offset = fda_offset + fdi * sizeof(fd);
+
+		binder_alloc_copy_from_buffer(&target_proc->alloc,
+					      &fd, t->buffer,
+					      offset, sizeof(fd));
+		target_fd = binder_translate_fd(fd, t, thread, in_reply_to);
 		if (target_fd < 0)
 			goto err_translate_fd_failed;
-		fd_array[fdi] = target_fd;
+		binder_alloc_copy_to_buffer(&target_proc->alloc,
+					    t->buffer, offset,
+					    &target_fd, sizeof(fd));
 	}
 	return 0;
 
@@ -2718,38 +2807,48 @@
 	 * installed so far.
 	 */
 	num_installed_fds = fdi;
-	for (fdi = 0; fdi < num_installed_fds; fdi++)
-		task_close_fd(target_proc, fd_array[fdi]);
+	for (fdi = 0; fdi < num_installed_fds; fdi++) {
+		u32 fd;
+		binder_size_t offset = fda_offset + fdi * sizeof(fd);
+		binder_alloc_copy_from_buffer(&target_proc->alloc,
+					      &fd, t->buffer,
+					      offset, sizeof(fd));
+		task_close_fd(target_proc, fd);
+	}
 	return target_fd;
 }
 
 static int binder_fixup_parent(struct binder_transaction *t,
 			       struct binder_thread *thread,
 			       struct binder_buffer_object *bp,
-			       binder_size_t *off_start,
+			       binder_size_t off_start_offset,
 			       binder_size_t num_valid,
-			       struct binder_buffer_object *last_fixup_obj,
+			       binder_size_t last_fixup_obj_off,
 			       binder_size_t last_fixup_min_off)
 {
 	struct binder_buffer_object *parent;
-	u8 *parent_buffer;
 	struct binder_buffer *b = t->buffer;
 	struct binder_proc *proc = thread->proc;
 	struct binder_proc *target_proc = t->to_proc;
+	struct binder_object object;
+	binder_size_t buffer_offset;
+	binder_size_t parent_offset;
 
 	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
 		return 0;
 
-	parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
+				     off_start_offset, &parent_offset,
+				     num_valid);
 	if (!parent) {
 		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
 				  proc->pid, thread->pid);
 		return -EINVAL;
 	}
 
-	if (!binder_validate_fixup(b, off_start,
-				   parent, bp->parent_offset,
-				   last_fixup_obj,
+	if (!binder_validate_fixup(target_proc, b, off_start_offset,
+				   parent_offset, bp->parent_offset,
+				   last_fixup_obj_off,
 				   last_fixup_min_off)) {
 		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
 				  proc->pid, thread->pid);
@@ -2763,10 +2862,10 @@
 				  proc->pid, thread->pid);
 		return -EINVAL;
 	}
-	parent_buffer = (u8 *)((uintptr_t)parent->buffer -
-			binder_alloc_get_user_buffer_offset(
-				&target_proc->alloc));
-	*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+	buffer_offset = bp->parent_offset +
+			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+	binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+				    &bp->buffer, sizeof(bp->buffer));
 
 	return 0;
 }
@@ -2891,9 +2990,10 @@
 	int ret;
 	struct binder_transaction *t;
 	struct binder_work *tcomplete;
-	binder_size_t *offp, *off_end, *off_start;
+	binder_size_t buffer_offset = 0;
+	binder_size_t off_start_offset, off_end_offset;
 	binder_size_t off_min;
-	u8 *sg_bufp, *sg_buf_end;
+	binder_size_t sg_buf_offset, sg_buf_end_offset;
 	struct binder_proc *target_proc = NULL;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
@@ -2902,7 +3002,7 @@
 	uint32_t return_error = 0;
 	uint32_t return_error_param = 0;
 	uint32_t return_error_line = 0;
-	struct binder_buffer_object *last_fixup_obj = NULL;
+	binder_size_t last_fixup_obj_off = 0;
 	binder_size_t last_fixup_min_off = 0;
 	struct binder_context *context = proc->context;
 	int t_debug_id = atomic_inc_return(&binder_last_id);
@@ -3166,11 +3266,11 @@
 				    ALIGN(tr->offsets_size, sizeof(void *)) +
 				    ALIGN(extra_buffers_size, sizeof(void *)) -
 				    ALIGN(secctx_sz, sizeof(u64));
-		char *kptr = t->buffer->data + buf_offset;
 
-		t->security_ctx = (uintptr_t)kptr +
-		    binder_alloc_get_user_buffer_offset(&target_proc->alloc);
-		memcpy(kptr, secctx, secctx_sz);
+		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+		binder_alloc_copy_to_buffer(&target_proc->alloc,
+					    t->buffer, buf_offset,
+					    secctx, secctx_sz);
 		security_release_secctx(secctx, secctx_sz);
 		secctx = NULL;
 	}
@@ -3178,12 +3278,13 @@
 	t->buffer->transaction = t;
 	t->buffer->target_node = target_node;
 	trace_binder_transaction_alloc_buf(t->buffer);
-	off_start = (binder_size_t *)(t->buffer->data +
-				      ALIGN(tr->data_size, sizeof(void *)));
-	offp = off_start;
 
-	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
-			   tr->data.ptr.buffer, tr->data_size)) {
+	if (binder_alloc_copy_user_to_buffer(
+				&target_proc->alloc,
+				t->buffer, 0,
+				(const void __user *)
+					(uintptr_t)tr->data.ptr.buffer,
+				tr->data_size)) {
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
@@ -3191,8 +3292,13 @@
 		return_error_line = __LINE__;
 		goto err_copy_data_failed;
 	}
-	if (copy_from_user(offp, (const void __user *)(uintptr_t)
-			   tr->data.ptr.offsets, tr->offsets_size)) {
+	if (binder_alloc_copy_user_to_buffer(
+				&target_proc->alloc,
+				t->buffer,
+				ALIGN(tr->data_size, sizeof(void *)),
+				(const void __user *)
+					(uintptr_t)tr->data.ptr.offsets,
+				tr->offsets_size)) {
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
@@ -3217,17 +3323,30 @@
 		return_error_line = __LINE__;
 		goto err_bad_offset;
 	}
-	off_end = (void *)off_start + tr->offsets_size;
-	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
-	sg_buf_end = sg_bufp + extra_buffers_size;
+	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
+	buffer_offset = off_start_offset;
+	off_end_offset = off_start_offset + tr->offsets_size;
+	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
+	sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
 	off_min = 0;
-	for (; offp < off_end; offp++) {
+	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+	     buffer_offset += sizeof(binder_size_t)) {
 		struct binder_object_header *hdr;
-		size_t object_size = binder_validate_object(t->buffer, *offp);
+		size_t object_size;
+		struct binder_object object;
+		binder_size_t object_offset;
 
-		if (object_size == 0 || *offp < off_min) {
+		binder_alloc_copy_from_buffer(&target_proc->alloc,
+					      &object_offset,
+					      t->buffer,
+					      buffer_offset,
+					      sizeof(object_offset));
+		object_size = binder_get_object(target_proc, t->buffer,
+						object_offset, &object);
+		if (object_size == 0 || object_offset < off_min) {
 			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
-					  proc->pid, thread->pid, (u64)*offp,
+					  proc->pid, thread->pid,
+					  (u64)object_offset,
 					  (u64)off_min,
 					  (u64)t->buffer->data_size);
 			return_error = BR_FAILED_REPLY;
@@ -3236,8 +3355,8 @@
 			goto err_bad_offset;
 		}
 
-		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
-		off_min = *offp + object_size;
+		hdr = &object.hdr;
+		off_min = object_offset + object_size;
 		switch (hdr->type) {
 		case BINDER_TYPE_BINDER:
 		case BINDER_TYPE_WEAK_BINDER: {
@@ -3251,6 +3370,9 @@
 				return_error_line = __LINE__;
 				goto err_translate_failed;
 			}
+			binder_alloc_copy_to_buffer(&target_proc->alloc,
+						    t->buffer, object_offset,
+						    fp, sizeof(*fp));
 		} break;
 		case BINDER_TYPE_HANDLE:
 		case BINDER_TYPE_WEAK_HANDLE: {
@@ -3264,6 +3386,9 @@
 				return_error_line = __LINE__;
 				goto err_translate_failed;
 			}
+			binder_alloc_copy_to_buffer(&target_proc->alloc,
+						    t->buffer, object_offset,
+						    fp, sizeof(*fp));
 		} break;
 
 		case BINDER_TYPE_FD: {
@@ -3279,14 +3404,23 @@
 			}
 			fp->pad_binder = 0;
 			fp->fd = target_fd;
+			binder_alloc_copy_to_buffer(&target_proc->alloc,
+						    t->buffer, object_offset,
+						    fp, sizeof(*fp));
 		} break;
 		case BINDER_TYPE_FDA: {
+			struct binder_object ptr_object;
+			binder_size_t parent_offset;
 			struct binder_fd_array_object *fda =
 				to_binder_fd_array_object(hdr);
+			size_t num_valid = (buffer_offset - off_start_offset) *
+						sizeof(binder_size_t);
 			struct binder_buffer_object *parent =
-				binder_validate_ptr(t->buffer, fda->parent,
-						    off_start,
-						    offp - off_start);
+				binder_validate_ptr(target_proc, t->buffer,
+						    &ptr_object, fda->parent,
+						    off_start_offset,
+						    &parent_offset,
+						    num_valid);
 			if (!parent) {
 				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
 						  proc->pid, thread->pid);
@@ -3295,9 +3429,11 @@
 				return_error_line = __LINE__;
 				goto err_bad_parent;
 			}
-			if (!binder_validate_fixup(t->buffer, off_start,
-						   parent, fda->parent_offset,
-						   last_fixup_obj,
+			if (!binder_validate_fixup(target_proc, t->buffer,
+						   off_start_offset,
+						   parent_offset,
+						   fda->parent_offset,
+						   last_fixup_obj_off,
 						   last_fixup_min_off)) {
 				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
 						  proc->pid, thread->pid);
@@ -3314,14 +3450,15 @@
 				return_error_line = __LINE__;
 				goto err_translate_failed;
 			}
-			last_fixup_obj = parent;
+			last_fixup_obj_off = parent_offset;
 			last_fixup_min_off =
 				fda->parent_offset + sizeof(u32) * fda->num_fds;
 		} break;
 		case BINDER_TYPE_PTR: {
 			struct binder_buffer_object *bp =
 				to_binder_buffer_object(hdr);
-			size_t buf_left = sg_buf_end - sg_bufp;
+			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
+			size_t num_valid;
 
 			if (bp->length > buf_left) {
 				binder_user_error("%d:%d got transaction with too large buffer\n",
@@ -3331,9 +3468,13 @@
 				return_error_line = __LINE__;
 				goto err_bad_offset;
 			}
-			if (copy_from_user(sg_bufp,
-					   (const void __user *)(uintptr_t)
-					   bp->buffer, bp->length)) {
+			if (binder_alloc_copy_user_to_buffer(
+						&target_proc->alloc,
+						t->buffer,
+						sg_buf_offset,
+						(const void __user *)
+							(uintptr_t)bp->buffer,
+						bp->length)) {
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 						  proc->pid, thread->pid);
 				return_error_param = -EFAULT;
@@ -3342,14 +3483,16 @@
 				goto err_copy_data_failed;
 			}
 			/* Fixup buffer pointer to target proc address space */
-			bp->buffer = (uintptr_t)sg_bufp +
-				binder_alloc_get_user_buffer_offset(
-						&target_proc->alloc);
-			sg_bufp += ALIGN(bp->length, sizeof(u64));
+			bp->buffer = (uintptr_t)
+				t->buffer->user_data + sg_buf_offset;
+			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
 
-			ret = binder_fixup_parent(t, thread, bp, off_start,
-						  offp - off_start,
-						  last_fixup_obj,
+			num_valid = (buffer_offset - off_start_offset) *
+					sizeof(binder_size_t);
+			ret = binder_fixup_parent(t, thread, bp,
+						  off_start_offset,
+						  num_valid,
+						  last_fixup_obj_off,
 						  last_fixup_min_off);
 			if (ret < 0) {
 				return_error = BR_FAILED_REPLY;
@@ -3357,7 +3500,10 @@
 				return_error_line = __LINE__;
 				goto err_translate_failed;
 			}
-			last_fixup_obj = bp;
+			binder_alloc_copy_to_buffer(&target_proc->alloc,
+						    t->buffer, object_offset,
+						    bp, sizeof(*bp));
+			last_fixup_obj_off = object_offset;
 			last_fixup_min_off = 0;
 		} break;
 		default:
@@ -3437,7 +3583,8 @@
 err_bad_parent:
 err_copy_data_failed:
 	trace_binder_transaction_failed_buffer_release(t->buffer);
-	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	binder_transaction_buffer_release(target_proc, t->buffer,
+					  buffer_offset, true);
 	if (target_node)
 		binder_dec_node_tmpref(target_node);
 	target_node = NULL;
@@ -3716,7 +3863,7 @@
 				binder_node_inner_unlock(buf_node);
 			}
 			trace_binder_transaction_buffer_release(buffer);
-			binder_transaction_buffer_release(proc, buffer, NULL);
+			binder_transaction_buffer_release(proc, buffer, 0, false);
 			binder_alloc_free_buf(&proc->alloc, buffer);
 			break;
 		}
@@ -4324,9 +4471,7 @@
 
 		trd->data_size = t->buffer->data_size;
 		trd->offsets_size = t->buffer->offsets_size;
-		trd->data.ptr.buffer = (binder_uintptr_t)
-			((uintptr_t)t->buffer->data +
-			binder_alloc_get_user_buffer_offset(&proc->alloc));
+		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
 		trd->data.ptr.offsets = trd->data.ptr.buffer +
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
@@ -5384,7 +5529,7 @@
 		seq_printf(m, " node %d", buffer->target_node->debug_id);
 	seq_printf(m, " size %zd:%zd data %pK\n",
 		   buffer->data_size, buffer->offsets_size,
-		   buffer->data);
+		   buffer->user_data);
 }
 
 static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 38bfa7a..34454b0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -28,6 +28,8 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/list_lru.h>
+#include <linux/uaccess.h>
+#include <linux/highmem.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
@@ -65,9 +67,8 @@
 				       struct binder_buffer *buffer)
 {
 	if (list_is_last(&buffer->entry, &alloc->buffers))
-		return (u8 *)alloc->buffer +
-			alloc->buffer_size - (u8 *)buffer->data;
-	return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+		return alloc->buffer + alloc->buffer_size - buffer->user_data;
+	return binder_buffer_next(buffer)->user_data - buffer->user_data;
 }
 
 static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -117,9 +118,9 @@
 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
 		BUG_ON(buffer->free);
 
-		if (new_buffer->data < buffer->data)
+		if (new_buffer->user_data < buffer->user_data)
 			p = &parent->rb_left;
-		else if (new_buffer->data > buffer->data)
+		else if (new_buffer->user_data > buffer->user_data)
 			p = &parent->rb_right;
 		else
 			BUG();
@@ -134,17 +135,17 @@
 {
 	struct rb_node *n = alloc->allocated_buffers.rb_node;
 	struct binder_buffer *buffer;
-	void *kern_ptr;
+	void __user *uptr;
 
-	kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+	uptr = (void __user *)user_ptr;
 
 	while (n) {
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 		BUG_ON(buffer->free);
 
-		if (kern_ptr < buffer->data)
+		if (uptr < buffer->user_data)
 			n = n->rb_left;
-		else if (kern_ptr > buffer->data)
+		else if (uptr > buffer->user_data)
 			n = n->rb_right;
 		else {
 			/*
@@ -184,9 +185,9 @@
 }
 
 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
-				    void *start, void *end)
+				    void __user *start, void __user *end)
 {
-	void *page_addr;
+	void __user *page_addr;
 	unsigned long user_page_addr;
 	struct binder_lru_page *page;
 	struct vm_area_struct *vma = NULL;
@@ -260,18 +261,7 @@
 		page->alloc = alloc;
 		INIT_LIST_HEAD(&page->lru);
 
-		ret = map_kernel_range_noflush((unsigned long)page_addr,
-					       PAGE_SIZE, PAGE_KERNEL,
-					       &page->page_ptr);
-		flush_cache_vmap((unsigned long)page_addr,
-				(unsigned long)page_addr + PAGE_SIZE);
-		if (ret != 1) {
-			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
-			       alloc->pid, page_addr);
-			goto err_map_kernel_failed;
-		}
-		user_page_addr =
-			(uintptr_t)page_addr + alloc->user_buffer_offset;
+		user_page_addr = (uintptr_t)page_addr;
 		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
 		if (ret) {
 			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -309,8 +299,6 @@
 		continue;
 
 err_vm_insert_page_failed:
-		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
 		__free_page(page->page_ptr);
 		page->page_ptr = NULL;
 err_alloc_page_failed:
@@ -336,8 +324,8 @@
 	struct binder_buffer *buffer;
 	size_t buffer_size;
 	struct rb_node *best_fit = NULL;
-	void *has_page_addr;
-	void *end_page_addr;
+	void __user *has_page_addr;
+	void __user *end_page_addr;
 	size_t size, data_offsets_size;
 	int ret;
 
@@ -431,15 +419,15 @@
 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
 		      alloc->pid, size, buffer, buffer_size);
 
-	has_page_addr =
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+	has_page_addr = (void __user *)
+		(((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
 	WARN_ON(n && buffer_size != size);
 	end_page_addr =
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+		(void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
 	if (end_page_addr > has_page_addr)
 		end_page_addr = has_page_addr;
-	ret = binder_update_page_range(alloc, 1,
-	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
+	ret = binder_update_page_range(alloc, 1, (void __user *)
+		PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -452,7 +440,7 @@
 			       __func__, alloc->pid);
 			goto err_alloc_buf_struct_failed;
 		}
-		new_buffer->data = (u8 *)buffer->data + size;
+		new_buffer->user_data = (u8 __user *)buffer->user_data + size;
 		list_add(&new_buffer->entry, &buffer->entry);
 		new_buffer->free = 1;
 		binder_insert_free_buffer(alloc, new_buffer);
@@ -478,8 +466,8 @@
 	return buffer;
 
 err_alloc_buf_struct_failed:
-	binder_update_page_range(alloc, 0,
-				 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+	binder_update_page_range(alloc, 0, (void __user *)
+				 PAGE_ALIGN((uintptr_t)buffer->user_data),
 				 end_page_addr);
 	return ERR_PTR(-ENOMEM);
 }
@@ -514,14 +502,15 @@
 	return buffer;
 }
 
-static void *buffer_start_page(struct binder_buffer *buffer)
+static void __user *buffer_start_page(struct binder_buffer *buffer)
 {
-	return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+	return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
 }
 
-static void *prev_buffer_end_page(struct binder_buffer *buffer)
+static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
 {
-	return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+	return (void __user *)
+		(((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -536,7 +525,8 @@
 		to_free = false;
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer %pK share page with %pK\n",
-				   alloc->pid, buffer->data, prev->data);
+				   alloc->pid, buffer->user_data,
+				   prev->user_data);
 	}
 
 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
@@ -546,23 +536,24 @@
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 					   "%d: merge free, buffer %pK share page with %pK\n",
 					   alloc->pid,
-					   buffer->data,
-					   next->data);
+					   buffer->user_data,
+					   next->user_data);
 		}
 	}
 
-	if (PAGE_ALIGNED(buffer->data)) {
+	if (PAGE_ALIGNED(buffer->user_data)) {
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer start %pK is page aligned\n",
-				   alloc->pid, buffer->data);
+				   alloc->pid, buffer->user_data);
 		to_free = false;
 	}
 
 	if (to_free) {
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
-				   alloc->pid, buffer->data,
-				   prev->data, next ? next->data : NULL);
+				   alloc->pid, buffer->user_data,
+				   prev->user_data,
+				   next ? next->user_data : NULL);
 		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
 					 buffer_start_page(buffer) + PAGE_SIZE);
 	}
@@ -588,8 +579,8 @@
 	BUG_ON(buffer->free);
 	BUG_ON(size > buffer_size);
 	BUG_ON(buffer->transaction != NULL);
-	BUG_ON(buffer->data < alloc->buffer);
-	BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+	BUG_ON(buffer->user_data < alloc->buffer);
+	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
 
 	if (buffer->async_transaction) {
 		alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -600,8 +591,9 @@
 	}
 
 	binder_update_page_range(alloc, 0,
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
+		(void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
+		(void __user *)(((uintptr_t)
+			  buffer->user_data + buffer_size) & PAGE_MASK));
 
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
@@ -657,7 +649,6 @@
 			      struct vm_area_struct *vma)
 {
 	int ret;
-	struct vm_struct *area;
 	const char *failure_string;
 	struct binder_buffer *buffer;
 
@@ -668,28 +659,9 @@
 		goto err_already_mapped;
 	}
 
-	area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
-	if (area == NULL) {
-		ret = -ENOMEM;
-		failure_string = "get_vm_area";
-		goto err_get_vm_area_failed;
-	}
-	alloc->buffer = area->addr;
-	alloc->user_buffer_offset =
-		vma->vm_start - (uintptr_t)alloc->buffer;
+	alloc->buffer = (void __user *)vma->vm_start;
 	mutex_unlock(&binder_alloc_mmap_lock);
 
-#ifdef CONFIG_CPU_CACHE_VIPT
-	if (cache_is_vipt_aliasing()) {
-		while (CACHE_COLOUR(
-				(vma->vm_start ^ (uint32_t)alloc->buffer))) {
-			pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
-				__func__, alloc->pid, vma->vm_start,
-				vma->vm_end, alloc->buffer);
-			vma->vm_start += PAGE_SIZE;
-		}
-	}
-#endif
 	alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
 				   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
 			       GFP_KERNEL);
@@ -707,7 +679,7 @@
 		goto err_alloc_buf_struct_failed;
 	}
 
-	buffer->data = alloc->buffer;
+	buffer->user_data = alloc->buffer;
 	list_add(&buffer->entry, &alloc->buffers);
 	buffer->free = 1;
 	binder_insert_free_buffer(alloc, buffer);
@@ -725,9 +697,7 @@
 	alloc->pages = NULL;
 err_alloc_pages_failed:
 	mutex_lock(&binder_alloc_mmap_lock);
-	vfree(alloc->buffer);
 	alloc->buffer = NULL;
-err_get_vm_area_failed:
 err_already_mapped:
 	mutex_unlock(&binder_alloc_mmap_lock);
 	pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
@@ -771,7 +741,7 @@
 		int i;
 
 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-			void *page_addr;
+			void __user *page_addr;
 			bool on_lru;
 
 			if (!alloc->pages[i].page_ptr)
@@ -784,12 +754,10 @@
 				     "%s: %d: page %d at %pK %s\n",
 				     __func__, alloc->pid, i, page_addr,
 				     on_lru ? "on lru" : "active");
-			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
 			__free_page(alloc->pages[i].page_ptr);
 			page_count++;
 		}
 		kfree(alloc->pages);
-		vfree(alloc->buffer);
 	}
 	mutex_unlock(&alloc->mutex);
 	if (alloc->vma_vm_mm)
@@ -804,7 +772,7 @@
 				struct binder_buffer *buffer)
 {
 	seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
-		   prefix, buffer->debug_id, buffer->data,
+		   prefix, buffer->debug_id, buffer->user_data,
 		   buffer->data_size, buffer->offsets_size,
 		   buffer->extra_buffers_size,
 		   buffer->transaction ? "active" : "delivered");
@@ -938,10 +906,7 @@
 	if (vma) {
 		trace_binder_unmap_user_start(alloc, index);
 
-		zap_page_range(vma,
-			       page_addr +
-			       alloc->user_buffer_offset,
-			       PAGE_SIZE, NULL);
+		zap_page_range(vma, page_addr, PAGE_SIZE, NULL);
 
 		trace_binder_unmap_user_end(alloc, index);
 
@@ -951,7 +916,6 @@
 
 	trace_binder_unmap_kernel_start(alloc, index);
 
-	unmap_kernel_range(page_addr, PAGE_SIZE);
 	__free_page(page->page_ptr);
 	page->page_ptr = NULL;
 
@@ -1018,3 +982,173 @@
 	}
 	return ret;
 }
+
+/**
+ * check_buffer() - verify that buffer/offset is safe to access
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @offset: offset into @buffer data
+ * @bytes: bytes to access from offset
+ *
+ * Check that the @offset/@bytes are within the size of the given
+ * @buffer and that the buffer is currently active and not freeable.
+ * Offsets must also be multiples of sizeof(u32). The kernel is
+ * allowed to touch the buffer in two cases:
+ *
+ * 1) when the buffer is being created:
+ *     (buffer->free == 0 && buffer->allow_user_free == 0)
+ * 2) when the buffer is being torn down:
+ *     (buffer->free == 0 && buffer->transaction == NULL).
+ *
+ * Return: true if the buffer is safe to access
+ */
+static inline bool check_buffer(struct binder_alloc *alloc,
+				struct binder_buffer *buffer,
+				binder_size_t offset, size_t bytes)
+{
+	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+	return buffer_size >= bytes &&
+		offset <= buffer_size - bytes &&
+		IS_ALIGNED(offset, sizeof(u32)) &&
+		!buffer->free &&
+		(!buffer->allow_user_free || !buffer->transaction);
+}
+
+/**
+ * binder_alloc_get_page() - get kernel pointer for given buffer offset
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @pgoffp: address to copy final page offset to
+ *
+ * Lookup the struct page corresponding to the address
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
+ * NULL, the byte-offset into the page is written there.
+ *
+ * The caller is responsible to ensure that the offset points
+ * to a valid address within the @buffer and that @buffer is
+ * not freeable by the user. Since it can't be freed, we are
+ * guaranteed that the corresponding elements of @alloc->pages[]
+ * cannot change.
+ *
+ * Return: struct page
+ */
+static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
+					  struct binder_buffer *buffer,
+					  binder_size_t buffer_offset,
+					  pgoff_t *pgoffp)
+{
+	binder_size_t buffer_space_offset = buffer_offset +
+		(buffer->user_data - alloc->buffer);
+	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
+	size_t index = buffer_space_offset >> PAGE_SHIFT;
+	struct binder_lru_page *lru_page;
+
+	lru_page = &alloc->pages[index];
+	*pgoffp = pgoff;
+	return lru_page->page_ptr;
+}
+
+/**
+ * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @from: userspace pointer to source buffer
+ * @bytes: bytes to copy
+ *
+ * Copy bytes from source userspace to target buffer.
+ *
+ * Return: bytes remaining to be copied
+ */
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+				 struct binder_buffer *buffer,
+				 binder_size_t buffer_offset,
+				 const void __user *from,
+				 size_t bytes)
+{
+	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
+		return bytes;
+
+	while (bytes) {
+		unsigned long size;
+		unsigned long ret;
+		struct page *page;
+		pgoff_t pgoff;
+		void *kptr;
+
+		page = binder_alloc_get_page(alloc, buffer,
+					     buffer_offset, &pgoff);
+		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+		kptr = kmap(page) + pgoff;
+		ret = copy_from_user(kptr, from, size);
+		kunmap(page);
+		if (ret)
+			return bytes - size + ret;
+		bytes -= size;
+		from += size;
+		buffer_offset += size;
+	}
+	return 0;
+}
+
+static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+					bool to_buffer,
+					struct binder_buffer *buffer,
+					binder_size_t buffer_offset,
+					void *ptr,
+					size_t bytes)
+{
+	/* All copies must be 32-bit aligned and 32-bit size */
+	BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
+
+	while (bytes) {
+		unsigned long size;
+		struct page *page;
+		pgoff_t pgoff;
+		void *tmpptr;
+		void *base_ptr;
+
+		page = binder_alloc_get_page(alloc, buffer,
+					     buffer_offset, &pgoff);
+		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+		base_ptr = kmap_atomic(page);
+		tmpptr = base_ptr + pgoff;
+		if (to_buffer)
+			memcpy(tmpptr, ptr, size);
+		else
+			memcpy(ptr, tmpptr, size);
+		/*
+		 * kunmap_atomic() takes care of flushing the cache
+		 * if this device has VIVT cache arch
+		 */
+		kunmap_atomic(base_ptr);
+		bytes -= size;
+		pgoff = 0;
+		ptr = ptr + size;
+		buffer_offset += size;
+	}
+}
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+				 struct binder_buffer *buffer,
+				 binder_size_t buffer_offset,
+				 void *src,
+				 size_t bytes)
+{
+	binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+				    src, bytes);
+}
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+				   void *dest,
+				   struct binder_buffer *buffer,
+				   binder_size_t buffer_offset,
+				   size_t bytes)
+{
+	binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+				    dest, bytes);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index fb3238c..b60d161 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/list_lru.h>
+#include <uapi/linux/android/binder.h>
 
 extern struct list_lru binder_alloc_lru;
 struct binder_transaction;
@@ -30,16 +31,16 @@
  * struct binder_buffer - buffer used for binder transactions
  * @entry:              entry alloc->buffers
  * @rb_node:            node for allocated_buffers/free_buffers rb trees
- * @free:               true if buffer is free
- * @allow_user_free:    describe the second member of struct blah,
- * @async_transaction:  describe the second member of struct blah,
- * @debug_id:           describe the second member of struct blah,
- * @transaction:        describe the second member of struct blah,
- * @target_node:        describe the second member of struct blah,
- * @data_size:          describe the second member of struct blah,
- * @offsets_size:       describe the second member of struct blah,
- * @extra_buffers_size: describe the second member of struct blah,
- * @data:i              describe the second member of struct blah,
+ * @free:               %true if buffer is free
+ * @allow_user_free:    %true if user is allowed to free buffer
+ * @async_transaction:  %true if buffer is in use for an async txn
+ * @debug_id:           unique ID for debugging
+ * @transaction:        pointer to associated struct binder_transaction
+ * @target_node:        struct binder_node associated with this buffer
+ * @data_size:          size of @transaction data
+ * @offsets_size:       size of array of offsets
+ * @extra_buffers_size: size of space for other objects (like sg lists)
+ * @user_data:          user pointer to base of buffer space
  *
  * Bookkeeping structure for binder transaction buffers
  */
@@ -58,7 +59,7 @@
 	size_t data_size;
 	size_t offsets_size;
 	size_t extra_buffers_size;
-	void *data;
+	void __user *user_data;
 };
 
 /**
@@ -81,7 +82,6 @@
  *                      (invariant after init)
  * @vma_vm_mm:          copy of vma->vm_mm (invarient after mmap)
  * @buffer:             base of per-proc address space mapped via mmap
- * @user_buffer_offset: offset between user and kernel VAs for buffer
  * @buffers:            list of all buffers for this proc
  * @free_buffers:       rb tree of buffers available for allocation
  *                      sorted by size
@@ -102,8 +102,7 @@
 	struct mutex mutex;
 	struct vm_area_struct *vma;
 	struct mm_struct *vma_vm_mm;
-	void *buffer;
-	ptrdiff_t user_buffer_offset;
+	void __user *buffer;
 	struct list_head buffers;
 	struct rb_root free_buffers;
 	struct rb_root allocated_buffers;
@@ -162,26 +161,24 @@
 	return free_async_space;
 }
 
-/**
- * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
- * @alloc:	binder_alloc for this proc
- *
- * Return:	the offset between kernel and user-space addresses to use for
- * virtual address conversion
- */
-static inline ptrdiff_t
-binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
-{
-	/*
-	 * user_buffer_offset is constant if vma is set and
-	 * undefined if vma is not set. It is possible to
-	 * get here with !alloc->vma if the target process
-	 * is dying while a transaction is being initiated.
-	 * Returning the old value is ok in this case and
-	 * the transaction will fail.
-	 */
-	return alloc->user_buffer_offset;
-}
+unsigned long
+binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+				 struct binder_buffer *buffer,
+				 binder_size_t buffer_offset,
+				 const void __user *from,
+				 size_t bytes);
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+				 struct binder_buffer *buffer,
+				 binder_size_t buffer_offset,
+				 void *src,
+				 size_t bytes);
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+				   void *dest,
+				   struct binder_buffer *buffer,
+				   binder_size_t buffer_offset,
+				   size_t bytes);
 
 #endif /* _LINUX_BINDER_ALLOC_H */
 
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 8bd7bce..b727089 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -102,11 +102,12 @@
 					 struct binder_buffer *buffer,
 					 size_t size)
 {
-	void *page_addr, *end;
+	void __user *page_addr;
+	void __user *end;
 	int page_index;
 
-	end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
-	page_addr = buffer->data;
+	end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+	page_addr = buffer->user_data;
 	for (; page_addr < end; page_addr += PAGE_SIZE) {
 		page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
 		if (!alloc->pages[page_index].page_ptr ||
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index b11dffc..7674231 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -296,7 +296,7 @@
 
 TRACE_EVENT(binder_update_page_range,
 	TP_PROTO(struct binder_alloc *alloc, bool allocate,
-		 void *start, void *end),
+		 void __user *start, void __user *end),
 	TP_ARGS(alloc, allocate, start, end),
 	TP_STRUCT__entry(
 		__field(int, proc)
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 0ad96c6..7017a81 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -51,38 +51,52 @@
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-	char buf[16];
+	char *buf;
 	unsigned int ret;
-	struct rm_feature_desc *desc = (void *)(buf + 8);
+	struct rm_feature_desc *desc;
 	struct ata_taskfile tf;
 	static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
 			2,      /* only 1 feature descriptor requested */
 			0, 3,   /* 3, removable medium feature */
 			0, 0, 0,/* reserved */
-			0, sizeof(buf),
+			0, 16,
 			0, 0, 0,
 	};
 
+	buf = kzalloc(16, GFP_KERNEL);
+	if (!buf)
+		return ODD_MECH_TYPE_UNSUPPORTED;
+	desc = (void *)(buf + 8);
+
 	ata_tf_init(dev, &tf);
 	tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.command = ATA_CMD_PACKET;
 	tf.protocol = ATAPI_PROT_PIO;
-	tf.lbam = sizeof(buf);
+	tf.lbam = 16;
 
 	ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-				buf, sizeof(buf), 0);
-	if (ret)
+				buf, 16, 0);
+	if (ret) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (be16_to_cpu(desc->feature_code) != 3)
+	if (be16_to_cpu(desc->feature_code) != 3) {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 
-	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+	if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_SLOT;
-	else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+	} else if (desc->mech_type == 1 && desc->load == 0 &&
+		   desc->eject == 1) {
+		kfree(buf);
 		return ODD_MECH_TYPE_DRAWER;
-	else
+	} else {
+		kfree(buf);
 		return ODD_MECH_TYPE_UNSUPPORTED;
+	}
 }
 
 /* Test if ODD is zero power ready by sense code */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 7ff7ae0..f000f12 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -622,11 +622,18 @@
 	return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_mds(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
 static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
 	&dev_attr_meltdown.attr,
@@ -634,6 +641,7 @@
 	&dev_attr_spectre_v2.attr,
 	&dev_attr_spec_store_bypass.attr,
 	&dev_attr_l1tf.attr,
+	&dev_attr_mds.attr,
 	NULL
 };
 
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index b758633..39676b3 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -116,7 +116,6 @@
 	if (!ws)
 		return;
 
-	del_timer_sync(&ws->timer);
 	__pm_relax(ws);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_drop);
@@ -204,6 +203,13 @@
 	list_del_rcu(&ws->entry);
 	spin_unlock_irqrestore(&events_lock, flags);
 	synchronize_srcu(&wakeup_srcu);
+
+	del_timer_sync(&ws->timer);
+	/*
+	 * Clear timer.function to make wakeup_source_not_registered() treat
+	 * this wakeup source as not registered.
+	 */
+	ws->timer.function = NULL;
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 326b9ba..6914c6e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3752,7 +3752,7 @@
 
 	if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
 		if (lock_fdc(drive))
-			return -EINTR;
+			return 0;
 		poll_drive(false, 0);
 		process_fd_request();
 	}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d7d2494..b3e432a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -82,7 +82,6 @@
 
 static DEFINE_IDR(loop_index_idr);
 static DEFINE_MUTEX(loop_index_mutex);
-static DEFINE_MUTEX(loop_ctl_mutex);
 
 static int max_part;
 static int part_shift;
@@ -1034,7 +1033,7 @@
 	 */
 	if (atomic_read(&lo->lo_refcnt) > 1) {
 		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
-		mutex_unlock(&loop_ctl_mutex);
+		mutex_unlock(&lo->lo_ctl_mutex);
 		return 0;
 	}
 
@@ -1084,12 +1083,12 @@
 	if (!part_shift)
 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
 	loop_unprepare_queue(lo);
-	mutex_unlock(&loop_ctl_mutex);
+	mutex_unlock(&lo->lo_ctl_mutex);
 	/*
-	 * Need not hold loop_ctl_mutex to fput backing file.
-	 * Calling fput holding loop_ctl_mutex triggers a circular
+	 * Need not hold lo_ctl_mutex to fput backing file.
+	 * Calling fput holding lo_ctl_mutex triggers a circular
 	 * lock dependency possibility warning as fput can take
-	 * bd_mutex which is usually taken before loop_ctl_mutex.
+	 * bd_mutex which is usually taken before lo_ctl_mutex.
 	 */
 	fput(filp);
 	return 0;
@@ -1402,7 +1401,7 @@
 	struct loop_device *lo = bdev->bd_disk->private_data;
 	int err;
 
-	mutex_lock_nested(&loop_ctl_mutex, 1);
+	mutex_lock_nested(&lo->lo_ctl_mutex, 1);
 	switch (cmd) {
 	case LOOP_SET_FD:
 		err = loop_set_fd(lo, mode, bdev, arg);
@@ -1411,7 +1410,7 @@
 		err = loop_change_fd(lo, bdev, arg);
 		break;
 	case LOOP_CLR_FD:
-		/* loop_clr_fd would have unlocked loop_ctl_mutex on success */
+		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
 		err = loop_clr_fd(lo);
 		if (!err)
 			goto out_unlocked;
@@ -1452,7 +1451,7 @@
 	default:
 		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
 	}
-	mutex_unlock(&loop_ctl_mutex);
+	mutex_unlock(&lo->lo_ctl_mutex);
 
 out_unlocked:
 	return err;
@@ -1585,16 +1584,16 @@
 
 	switch(cmd) {
 	case LOOP_SET_STATUS:
-		mutex_lock(&loop_ctl_mutex);
+		mutex_lock(&lo->lo_ctl_mutex);
 		err = loop_set_status_compat(
 			lo, (const struct compat_loop_info __user *) arg);
-		mutex_unlock(&loop_ctl_mutex);
+		mutex_unlock(&lo->lo_ctl_mutex);
 		break;
 	case LOOP_GET_STATUS:
-		mutex_lock(&loop_ctl_mutex);
+		mutex_lock(&lo->lo_ctl_mutex);
 		err = loop_get_status_compat(
 			lo, (struct compat_loop_info __user *) arg);
-		mutex_unlock(&loop_ctl_mutex);
+		mutex_unlock(&lo->lo_ctl_mutex);
 		break;
 	case LOOP_SET_CAPACITY:
 	case LOOP_CLR_FD:
@@ -1639,7 +1638,7 @@
 	if (atomic_dec_return(&lo->lo_refcnt))
 		return;
 
-	mutex_lock(&loop_ctl_mutex);
+	mutex_lock(&lo->lo_ctl_mutex);
 	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
 		/*
 		 * In autoclear mode, stop the loop thread
@@ -1656,7 +1655,7 @@
 		loop_flush(lo);
 	}
 
-	mutex_unlock(&loop_ctl_mutex);
+	mutex_unlock(&lo->lo_ctl_mutex);
 }
 
 static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -1702,10 +1701,10 @@
 	struct loop_device *lo = ptr;
 	struct loop_func_table *xfer = data;
 
-	mutex_lock(&loop_ctl_mutex);
+	mutex_lock(&lo->lo_ctl_mutex);
 	if (lo->lo_encryption == xfer)
 		loop_release_xfer(lo);
-	mutex_unlock(&loop_ctl_mutex);
+	mutex_unlock(&lo->lo_ctl_mutex);
 	return 0;
 }
 
@@ -1872,6 +1871,7 @@
 	if (!part_shift)
 		disk->flags |= GENHD_FL_NO_PART_SCAN;
 	disk->flags |= GENHD_FL_EXT_DEVT;
+	mutex_init(&lo->lo_ctl_mutex);
 	atomic_set(&lo->lo_refcnt, 0);
 	lo->lo_number		= i;
 	spin_lock_init(&lo->lo_lock);
@@ -1984,19 +1984,19 @@
 		ret = loop_lookup(&lo, parm);
 		if (ret < 0)
 			break;
-		mutex_lock(&loop_ctl_mutex);
+		mutex_lock(&lo->lo_ctl_mutex);
 		if (lo->lo_state != Lo_unbound) {
 			ret = -EBUSY;
-			mutex_unlock(&loop_ctl_mutex);
+			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		if (atomic_read(&lo->lo_refcnt) > 0) {
 			ret = -EBUSY;
-			mutex_unlock(&loop_ctl_mutex);
+			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		lo->lo_disk->private_data = NULL;
-		mutex_unlock(&loop_ctl_mutex);
+		mutex_unlock(&lo->lo_ctl_mutex);
 		idr_remove(&loop_index_idr, lo->lo_number);
 		loop_remove(lo);
 		break;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index a923e74..60f0fd2 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -55,6 +55,7 @@
 
 	spinlock_t		lo_lock;
 	int			lo_state;
+	struct mutex		lo_ctl_mutex;
 	struct kthread_worker	worker;
 	struct task_struct	*worker_task;
 	bool			use_dio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 10332c2..44ef1d6 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -392,6 +392,8 @@
 	if (err)
 		num_vqs = 1;
 
+	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
 	if (!vblk->vqs)
 		return -ENOMEM;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c4328d9..f838119 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1062,6 +1062,8 @@
 	return 0;
 
 err_read:
+	/* prevent double queue cleanup */
+	ace->gd->queue = NULL;
 	put_disk(ace->gd);
 err_alloc_disk:
 	blk_cleanup_queue(ace->queue);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ff42808..a46f188 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -265,6 +265,7 @@
 /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
 /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
 
+#include <linux/atomic.h>
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/major.h>
@@ -3683,9 +3684,9 @@
 
 static void cdrom_sysctl_register(void)
 {
-	static int initialized;
+	static atomic_t initialized = ATOMIC_INIT(0);
 
-	if (initialized == 1)
+	if (!atomic_add_unless(&initialized, 1, 1))
 		return;
 
 	cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
@@ -3696,8 +3697,6 @@
 	cdrom_sysctl_settings.debug = debug;
 	cdrom_sysctl_settings.lock = lockdoor;
 	cdrom_sysctl_settings.check = check_media_type;
-
-	initialized = 1;
 }
 
 static void cdrom_sysctl_unregister(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 87d8c3c..53125ec 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -379,7 +379,7 @@
 
 config R3964
 	tristate "Siemens R3964 line discipline"
-	depends on TTY
+	depends on TTY && BROKEN
 	---help---
 	  This driver allows synchronous communication with devices using the
 	  Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 1db48c6..dea48e1 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -225,9 +225,11 @@
 	int tgid;
 	remote_arg_t *lpra;
 	remote_arg64_t *rpra;
+	remote_arg64_t *lrpra;		/* Local copy of rpra for put_args */
 	int *fds;
 	struct fastrpc_mmap **maps;
 	struct fastrpc_buf *buf;
+	struct fastrpc_buf *lbuf;
 	size_t used;
 	struct fastrpc_file *fl;
 	uint32_t sc;
@@ -621,8 +623,13 @@
 			if (va >= map->va &&
 				va + len <= map->va + map->len &&
 				map->fd == fd) {
-				if (refs)
+				if (refs) {
+					if (map->refs + 1 == INT_MAX) {
+						spin_unlock(&me->hlock);
+						return -ETOOMANYREFS;
+					}
 					map->refs++;
+				}
 				match = map;
 				break;
 			}
@@ -633,8 +640,11 @@
 			if (va >= map->va &&
 				va + len <= map->va + map->len &&
 				map->fd == fd) {
-				if (refs)
+				if (refs) {
+					if (map->refs + 1 == INT_MAX)
+						return -ETOOMANYREFS;
 					map->refs++;
+				}
 				match = map;
 				break;
 			}
@@ -1273,6 +1283,7 @@
 
 	mutex_unlock(&ctx->fl->fl_map_mutex);
 	fastrpc_buf_free(ctx->buf, 1);
+	fastrpc_buf_free(ctx->lbuf, 1);
 	ctx->magic = 0;
 	ctx->ctxid = 0;
 
@@ -1419,7 +1430,7 @@
 static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 {
 	struct fastrpc_apps *me = &gfa;
-	remote_arg64_t *rpra;
+	remote_arg64_t *rpra, *lrpra;
 	remote_arg_t *lpra = ctx->lpra;
 	struct smq_invoke_buf *list;
 	struct smq_phy_page *pages, *ipage;
@@ -1428,7 +1439,7 @@
 	int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
 	int handles, bufs = inbufs + outbufs;
 	uintptr_t args;
-	size_t rlen = 0, copylen = 0, metalen = 0;
+	size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
 	int i, oix;
 	int err = 0;
 	int mflags = 0;
@@ -1486,7 +1497,20 @@
 		metalen = copylen = (size_t)&ipage[0];
 	}
 
-	/* calculate len requreed for copying */
+	/* allocate new local rpra buffer */
+	lrpralen = (size_t)&list[0];
+	if (lrpralen) {
+		err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
+		if (err)
+			goto bail;
+	}
+	if (ctx->lbuf->virt)
+		memset(ctx->lbuf->virt, 0, lrpralen);
+
+	lrpra = ctx->lbuf->virt;
+	ctx->lrpra = lrpra;
+
+	/* calculate len required for copying */
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		uintptr_t mstart, mend;
@@ -1537,13 +1561,13 @@
 
 	/* map ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
-	for (i = 0; rpra && i < inbufs + outbufs; ++i) {
+	for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
 		struct fastrpc_mmap *map = ctx->maps[i];
 		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
 		size_t len = lpra[i].buf.len;
 
-		rpra[i].buf.pv = 0;
-		rpra[i].buf.len = len;
+		rpra[i].buf.pv = lrpra[i].buf.pv = 0;
+		rpra[i].buf.len = lrpra[i].buf.len = len;
 		if (!len)
 			continue;
 		if (map) {
@@ -1571,7 +1595,7 @@
 			pages[idx].addr = map->phys + offset;
 			pages[idx].size = num << PAGE_SHIFT;
 		}
-		rpra[i].buf.pv = buf;
+		rpra[i].buf.pv = lrpra[i].buf.pv = buf;
 	}
 	PERF_END);
 	for (i = bufs; i < bufs + handles; ++i) {
@@ -1591,7 +1615,7 @@
 	/* copy non ion buffers */
 	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
 	rlen = copylen - metalen;
-	for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
+	for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
 		size_t mlen;
@@ -1610,7 +1634,8 @@
 		VERIFY(err, rlen >= mlen);
 		if (err)
 			goto bail;
-		rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
+		rpra[i].buf.pv = lrpra[i].buf.pv =
+			 (args - ctx->overps[oix]->offset);
 		pages[list[i].pgidx].addr = ctx->buf->phys -
 					    ctx->overps[oix]->offset +
 					    (copylen - rlen);
@@ -1642,7 +1667,8 @@
 		if (map && (map->attr & FASTRPC_ATTR_COHERENT))
 			continue;
 
-		if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
+		if (rpra && lrpra && rpra[i].buf.len &&
+			ctx->overps[oix]->mstart) {
 			if (map && map->handle)
 				msm_ion_do_cache_op(ctx->fl->apps->client,
 					map->handle,
@@ -1656,10 +1682,11 @@
 		}
 	}
 	PERF_END);
-	for (i = bufs; rpra && i < bufs + handles; i++) {
-		rpra[i].dma.fd = ctx->fds[i];
-		rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
-		rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
+	for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
+		rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
+		rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
+		rpra[i].dma.offset = lrpra[i].dma.offset =
+			 (uint32_t)(uintptr_t)lpra[i].buf.pv;
 	}
 
  bail:
@@ -1677,7 +1704,7 @@
 	uint64_t *fdlist = NULL;
 	uint32_t *crclist = NULL;
 
-	remote_arg64_t *rpra = ctx->rpra;
+	remote_arg64_t *rpra = ctx->lrpra;
 	int i, inbufs, outbufs, handles;
 	int err = 0;
 
@@ -1784,7 +1811,7 @@
 {
 	int i, inbufs, outbufs;
 	uint32_t sc = ctx->sc;
-	remote_arg64_t *rpra = ctx->rpra;
+	remote_arg64_t *rpra = ctx->lrpra;
 
 	inbufs = REMOTE_SCALARS_INBUFS(sc);
 	outbufs = REMOTE_SCALARS_OUTBUFS(sc);
@@ -3533,6 +3560,7 @@
 static int fastrpc_internal_control(struct fastrpc_file *fl,
 					struct fastrpc_ioctl_control *cp)
 {
+	struct fastrpc_apps *me = &gfa;
 	int err = 0;
 	int latency;
 
@@ -3558,7 +3586,8 @@
 			pm_qos_update_request(&fl->pm_qos_req, latency);
 		break;
 	case FASTRPC_CONTROL_SMMU:
-		fl->sharedcb = cp->smmu.sharedcb;
+		if (!me->legacy)
+			fl->sharedcb = cp->smmu.sharedcb;
 		break;
 	case FASTRPC_CONTROL_KALLOC:
 		cp->kalloc.kalloc_support = 1;
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index d3ea8b7..57d6544 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1951,7 +1951,7 @@
 	if (!buf)
 		return -EIO;
 
-	if (len <= (sizeof(struct dci_pkt_req_t) +
+	if (len < (sizeof(struct dci_pkt_req_t) +
 		sizeof(struct diag_pkt_header_t)) ||
 		len > DCI_REQ_BUF_SIZE) {
 		pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
@@ -1964,7 +1964,6 @@
 	req_len -= sizeof(struct dci_pkt_req_t);
 	req_buf = temp; /* Start of the Request */
 	header = (struct diag_pkt_header_t *)temp;
-	temp += sizeof(struct diag_pkt_header_t);
 	read_len += sizeof(struct diag_pkt_header_t);
 	if (read_len >= DCI_REQ_BUF_SIZE) {
 		pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 36adaa9..7ef332f 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -656,6 +656,7 @@
 	int usb_connected;
 #endif
 	int pcie_connected;
+	int pcie_switch_pid;
 	struct workqueue_struct *diag_wq;
 	struct work_struct diag_drain_work;
 	struct work_struct update_user_clients;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 47da553..e2382a3 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -460,15 +460,19 @@
 	session_info = diag_md_session_get_pid(pid);
 	if (!session_info) {
 		mutex_unlock(&driver->md_session_lock);
-		if (driver->pcie_transport_def == DIAG_ROUTE_TO_PCIE)
-			params.req_mode = PCIE_MODE;
-		else
-			params.req_mode = USB_MODE;
-		params.mode_param = 0;
-		params.pd_mask = 0;
-		params.peripheral_mask = DIAG_CON_ALL;
 		mutex_lock(&driver->diagchar_mutex);
-		diag_switch_logging(&params);
+		if (driver->pcie_switch_pid == pid) {
+			if (driver->pcie_transport_def ==
+				DIAG_ROUTE_TO_PCIE)
+				params.req_mode = PCIE_MODE;
+			else
+				params.req_mode = USB_MODE;
+			params.mode_param = 0;
+			params.pd_mask = 0;
+			params.peripheral_mask = DIAG_CON_ALL;
+			diag_switch_logging(&params);
+			driver->pcie_switch_pid = 0;
+		}
 		mutex_unlock(&driver->diagchar_mutex);
 		return;
 	}
@@ -1895,6 +1899,17 @@
 	}
 	driver->logging_mode = new_mode;
 	driver->logging_mask = peripheral_mask;
+	if (((curr_mode == DIAG_PCIE_MODE && new_mode == DIAG_USB_MODE) ||
+		(curr_mode == DIAG_USB_MODE && new_mode == DIAG_PCIE_MODE)) &&
+		!driver->pcie_switch_pid) {
+		/*
+		 * Store the pid of process affecting switch
+		 * from USB to PCIE or vice versa to help
+		 * close only this process while closing
+		 * logging process.
+		 */
+		driver->pcie_switch_pid = current->tgid;
+	}
 	if (new_mode == DIAG_PCIE_MODE) {
 		driver->transport_set = DIAG_ROUTE_TO_PCIE;
 		diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_pcie_apps,
@@ -4204,6 +4219,7 @@
 	driver->mask_check = 0;
 	driver->in_busy_pktdata = 0;
 	driver->in_busy_dcipktdata = 0;
+	driver->pcie_switch_pid = 0;
 	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
 	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
 	hdlc_data.len = 0;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 50272fe..818a8d4 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -376,7 +376,7 @@
 	pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
 	return 1;
 }
-__setup("hpet_mmap", hpet_mmap_enable);
+__setup("hpet_mmap=", hpet_mmap_enable);
 
 static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
 {
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 3fa2f8a..1c5c431 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -73,7 +73,7 @@
 
 	if (!vi->busy) {
 		vi->busy = true;
-		init_completion(&vi->have_data);
+		reinit_completion(&vi->have_data);
 		register_buffer(vi, buf, size);
 	}
 
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index dac36ef..996b9ae1 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -699,12 +699,16 @@
 			/* End of read */
 			len = ssif_info->multi_len;
 			data = ssif_info->data;
-		} else if (blocknum != ssif_info->multi_pos) {
+		} else if (blocknum + 1 != ssif_info->multi_pos) {
 			/*
 			 * Out of sequence block, just abort.  Block
 			 * numbers start at zero for the second block,
 			 * but multi_pos starts at one, so the +1.
 			 */
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				dev_dbg(&ssif_info->client->dev,
+					"Received message out of sequence, expected %u, got %u\n",
+					ssif_info->multi_pos - 1, blocknum);
 			result = -EIO;
 		} else {
 			ssif_inc_stat(ssif_info, received_message_parts);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a9ce2f7..4b0edcd 100755
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -893,6 +893,7 @@
 	if (crng == &primary_crng && crng_init < 2) {
 		numa_crng_init();
 		crng_init = 2;
+		spin_unlock_irqrestore(&crng->lock, flags);
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: crng init done\n");
@@ -908,8 +909,9 @@
 				  urandom_warning.missed);
 			urandom_warning.missed = 0;
 		}
+	} else {
+		spin_unlock_irqrestore(&crng->lock, flags);
 	}
-	spin_unlock_irqrestore(&crng->lock, flags);
 }
 
 static inline void maybe_reseed_primary_crng(void)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index fa0f668..d29f784 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -102,19 +102,29 @@
 	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
 	unsigned int expected;
 
-	/* sanity check */
-	if (count < 6)
+	/* A sanity check that the upper layer wants to get at least the header
+	 * as that is the minimum size for any TPM response.
+	 */
+	if (count < TPM_HEADER_SIZE)
 		return -EIO;
 
+	/* If this bit is set, according to the spec, the TPM is in
+	 * unrecoverable condition.
+	 */
 	if (ioread32(&priv->cca->sts) & CRB_CTRL_STS_ERROR)
 		return -EIO;
 
-	memcpy_fromio(buf, priv->rsp, 6);
-	expected = be32_to_cpup((__be32 *) &buf[2]);
-	if (expected > count || expected < 6)
+	/* Read the first 8 bytes in order to get the length of the response.
+	 * We read exactly a quad word in order to make sure that the remaining
+	 * reads will be aligned.
+	 */
+	memcpy_fromio(buf, priv->rsp, 8);
+
+	expected = be32_to_cpup((__be32 *)&buf[2]);
+	if (expected > count || expected < TPM_HEADER_SIZE)
 		return -EIO;
 
-	memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
+	memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
 
 	return expected;
 }
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 95ce2e9..cc4e642 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -65,7 +65,15 @@
 	dev_dbg(&chip->dev,
 		"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
 		(int)min_t(size_t, 64, len), buf, len, status);
-	return status;
+
+	if (status < 0)
+		return status;
+
+	/* The upper layer does not support incomplete sends. */
+	if (status != len)
+		return -E2BIG;
+
+	return 0;
 }
 
 static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 7b222a5..82d615f 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -41,6 +41,43 @@
 	return pdmclk->enabled;
 }
 
+static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
+					  unsigned int reg)
+{
+	const u8 reset_mask = TWL6040_HPLLRST;	/* Same for HPPLL and LPPLL */
+	int ret;
+
+	ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
+	if (ret < 0)
+		return ret;
+
+	ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
+ * Cold Temperature". This affects cold boot and deeper idle states it
+ * seems. The workaround consists of resetting HPPLL and LPPLL.
+ */
+static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
+{
+	int ret;
+
+	ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
+	if (ret)
+		return ret;
+
+	ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static int twl6040_pdmclk_prepare(struct clk_hw *hw)
 {
 	struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
@@ -48,8 +85,20 @@
 	int ret;
 
 	ret = twl6040_power(pdmclk->twl6040, 1);
-	if (!ret)
-		pdmclk->enabled = 1;
+	if (ret)
+		return ret;
+
+	ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
+	if (ret)
+		goto out_err;
+
+	pdmclk->enabled = 1;
+
+	return 0;
+
+out_err:
+	dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
+	twl6040_power(pdmclk->twl6040, 0);
 
 	return ret;
 }
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index e8248f9..4dec9e9 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -364,16 +364,16 @@
 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
 	const struct ingenic_cgu_clk_info *clk_info;
-	long rate = *parent_rate;
+	unsigned int div = 1;
 
 	clk_info = &cgu->clock_info[ingenic_clk->idx];
 
 	if (clk_info->type & CGU_CLK_DIV)
-		rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
+		div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
 	else if (clk_info->type & CGU_CLK_FIXDIV)
-		rate /= clk_info->fixdiv.div;
+		div = clk_info->fixdiv.div;
 
-	return rate;
+	return DIV_ROUND_UP(*parent_rate, div);
 }
 
 static int
@@ -393,7 +393,7 @@
 
 	if (clk_info->type & CGU_CLK_DIV) {
 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
-		rate = parent_rate / div;
+		rate = DIV_ROUND_UP(parent_rate, div);
 
 		if (rate != req_rate)
 			return -EINVAL;
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 09700b2..a22f654 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -78,7 +78,7 @@
  * @reg: offset of the divider control register within the CGU
  * @shift: number of bits to left shift the divide value by (ie. the index of
  *         the lowest bit of the divide value within its control register)
- * @div: number of bits to divide the divider value by (i.e. if the
+ * @div: number to divide the divider value by (i.e. if the
  *	 effective divider value is the value written to the register
  *	 multiplied by some constant)
  * @bits: the size of the divide value in bits
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 6ea5401..7f12812 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -252,9 +252,9 @@
 static SUNXI_CCU_GATE(ahb1_mmc2_clk,	"ahb1-mmc2",	"ahb1",
 		      0x060, BIT(10), 0);
 static SUNXI_CCU_GATE(ahb1_mmc3_clk,	"ahb1-mmc3",	"ahb1",
-		      0x060, BIT(12), 0);
+		      0x060, BIT(11), 0);
 static SUNXI_CCU_GATE(ahb1_nand1_clk,	"ahb1-nand1",	"ahb1",
-		      0x060, BIT(13), 0);
+		      0x060, BIT(12), 0);
 static SUNXI_CCU_GATE(ahb1_nand0_clk,	"ahb1-nand0",	"ahb1",
 		      0x060, BIT(13), 0);
 static SUNXI_CCU_GATE(ahb1_sdram_clk,	"ahb1-sdram",	"ahb1",
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 66d1fc7..1ab36a3 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -638,8 +638,8 @@
 		pll_override_writel(val, params->pmc_divp_reg, pll);
 
 		val = pll_override_readl(params->pmc_divnm_reg, pll);
-		val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
-			~(divn_mask(pll) << div_nmp->override_divn_shift);
+		val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
+			(divn_mask(pll) << div_nmp->override_divn_shift));
 		val |= (cfg->m << div_nmp->override_divm_shift) |
 			(cfg->n << div_nmp->override_divn_shift);
 		pll_override_writel(val, params->pmc_divnm_reg, pll);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 7f6fed9..fb0cf8b 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -388,6 +388,13 @@
 	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
 }
 
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+	/* Clear the MCT tick interrupt */
+	if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
+		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+}
+
 static int exynos4_tick_set_next_event(unsigned long cycles,
 				       struct clock_event_device *evt)
 {
@@ -404,6 +411,7 @@
 
 	mevt = container_of(evt, struct mct_clock_event_device, evt);
 	exynos4_mct_tick_stop(mevt);
+	exynos4_mct_tick_clear(mevt);
 	return 0;
 }
 
@@ -420,8 +428,11 @@
 	return 0;
 }
 
-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
 {
+	struct mct_clock_event_device *mevt = dev_id;
+	struct clock_event_device *evt = &mevt->evt;
+
 	/*
 	 * This is for supporting oneshot mode.
 	 * Mct would generate interrupt periodically
@@ -430,16 +441,6 @@
 	if (!clockevent_state_periodic(&mevt->evt))
 		exynos4_mct_tick_stop(mevt);
 
-	/* Clear the MCT tick interrupt */
-	if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
-		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
-}
-
-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
-{
-	struct mct_clock_event_device *mevt = dev_id;
-	struct clock_event_device *evt = &mevt->evt;
-
 	exynos4_mct_tick_clear(mevt);
 
 	evt->event_handler(evt);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f690085..4fe9996 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1413,7 +1413,7 @@
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(INTEL_FAM6_SANDYBRIDGE, 		core_params),
 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_params),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	silvermont_params),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT,	silvermont_params),
 	ICPU(INTEL_FAM6_IVYBRIDGE,		core_params),
 	ICPU(INTEL_FAM6_HASWELL_CORE,		core_params),
 	ICPU(INTEL_FAM6_BROADWELL_CORE,		core_params),
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index ce345bf..a24e9f0 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -192,7 +192,7 @@
 	return ret;
 }
 
-static void __init pxa_cpufreq_init_voltages(void)
+static void pxa_cpufreq_init_voltages(void)
 {
 	vcc_core = regulator_get(NULL, "vcc_core");
 	if (IS_ERR(vcc_core)) {
@@ -208,7 +208,7 @@
 	return 0;
 }
 
-static void __init pxa_cpufreq_init_voltages(void) { }
+static void pxa_cpufreq_init_voltages(void) { }
 #endif
 
 static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 4353025..4bb154f 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -134,6 +134,8 @@
 
 	platform_set_drvdata(pdev, priv);
 
+	of_node_put(np);
+
 	return 0;
 
 out_switch_to_pllx:
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 65bb6fd..72bc6e6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -18,6 +18,7 @@
 #include <linux/hrtimer.h>
 #include <linux/tick.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/math64.h>
 #include <linux/module.h>
 
@@ -130,10 +131,6 @@
 	int		interval_ptr;
 };
 
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static inline int get_loadavg(unsigned long load)
 {
 	return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4afca39..e3b8beb 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -138,7 +138,8 @@
 	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
 	ctx->hash_final = 0;
 
-	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
+				 SA_SAVE_IV : SA_NOT_SAVE_IV),
 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
 				 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
 				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c7524bb..7d066fa 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -646,6 +646,15 @@
 		addr = dma_map_page(dev->core_dev->device, sg_page(dst),
 				    dst->offset, dst->length, DMA_FROM_DEVICE);
 	}
+
+	if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+
+		crypto4xx_memcpy_from_le32((u32 *)req->iv,
+			pd_uinfo->sr_va->save_iv,
+			crypto_skcipher_ivsize(skcipher));
+	}
+
 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
 	if (ablk_req->base.complete != NULL)
 		ablk_req->base.complete(&ablk_req->base, 0);
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17..368c559 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -80,8 +80,10 @@
 
 	/* Find the TRNG device node and map it */
 	trng = of_find_matching_node(NULL, ppc4xx_trng_match);
-	if (!trng || !of_device_is_available(trng))
+	if (!trng || !of_device_is_available(trng)) {
+		of_node_put(trng);
 		return;
+	}
 
 	dev->trng_base = of_iomap(trng, 0);
 	of_node_put(trng);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0d743c6..88caca3 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2131,6 +2131,7 @@
 	if (unlikely(req->src != req->dst)) {
 		if (!edesc->dst_nents) {
 			dst_dma = sg_dma_address(req->dst);
+			out_options = 0;
 		} else {
 			dst_dma = edesc->sec4_sg_dma +
 				  sec4_sg_index *
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 0b4a293..d9281a2 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1815,7 +1815,7 @@
 	stvx_u		$out1,$x10,$out
 	stvx_u		$out2,$x20,$out
 	addi		$out,$out,0x30
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_two:
@@ -1827,7 +1827,7 @@
 	stvx_u		$out0,$x00,$out
 	stvx_u		$out1,$x10,$out
 	addi		$out,$out,0x20
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_one:
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 1cfa1d9..f8786f6 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -290,7 +290,7 @@
 	struct scatterlist *sg = d->sg;
 	unsigned long now;
 
-	now = min(d->len, sg_dma_len(sg));
+	now = min_t(size_t, d->len, sg_dma_len(sg));
 	if (d->len != IMX_DMA_LENGTH_LOOP)
 		d->len -= now;
 
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10..5444f39 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -131,24 +131,25 @@
 		desc = &mdesc->desc;
 		last_cookie = desc->cookie;
 
+		llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+
 		spin_lock_irqsave(&mchan->lock, irqflags);
+		if (llstat == DMA_COMPLETE) {
+			mchan->last_success = last_cookie;
+			result.result = DMA_TRANS_NOERROR;
+		} else {
+			result.result = DMA_TRANS_ABORTED;
+		}
+
 		dma_cookie_complete(desc);
 		spin_unlock_irqrestore(&mchan->lock, irqflags);
 
-		llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
 		dmaengine_desc_get_callback(desc, &cb);
 
 		dma_run_dependencies(desc);
 
 		spin_lock_irqsave(&mchan->lock, irqflags);
 		list_move(&mdesc->node, &mchan->free);
-
-		if (llstat == DMA_COMPLETE) {
-			mchan->last_success = last_cookie;
-			result.result = DMA_TRANS_NOERROR;
-		} else
-			result.result = DMA_TRANS_ABORTED;
-
 		spin_unlock_irqrestore(&mchan->lock, irqflags);
 
 		dmaengine_desc_callback_invoke(&cb, &result);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d032032..f37a6ef 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1311,6 +1311,7 @@
 	enum dma_status status;
 	unsigned long flags;
 	unsigned int residue;
+	bool cyclic;
 
 	status = dma_cookie_status(chan, cookie, txstate);
 	if (status == DMA_COMPLETE || !txstate)
@@ -1318,10 +1319,11 @@
 
 	spin_lock_irqsave(&rchan->lock, flags);
 	residue = rcar_dmac_chan_get_residue(rchan, cookie);
+	cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
 	spin_unlock_irqrestore(&rchan->lock, flags);
 
 	/* if there's no residue, the cookie is complete */
-	if (!residue)
+	if (!residue && !cyclic)
 		return DMA_COMPLETE;
 
 	dma_set_residue(txstate, residue);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3722b9d..22f7f0c 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -635,7 +635,10 @@
 
 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
 	dma_desc = sgreq->dma_desc;
-	dma_desc->bytes_transferred += sgreq->req_len;
+	/* if we dma for long enough the transfer count will wrap */
+	dma_desc->bytes_transferred =
+		(dma_desc->bytes_transferred + sgreq->req_len) %
+		dma_desc->bytes_requested;
 
 	/* Callback need to be call */
 	if (!dma_desc->cb_count)
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 236004b..9faa09e 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -93,7 +93,7 @@
 
 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
 			continue;
-		if (md->virt_addr == 0) {
+		if (md->virt_addr == 0 && md->phys_addr != 0) {
 			/* no virtual mapping has been installed by the stub */
 			break;
 		}
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 8ff7b0d..3b68c03 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@
 	if (err < 0)
 		goto out;
 
-	if (err & BIT(pos))
-		err = -EACCES;
+	if (value & BIT(pos)) {
+		err = -EPERM;
+		goto out;
+	}
 
 	err = 0;
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 6f9c9ac..75f30a0 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -837,14 +837,16 @@
 	if (trigger)
 		omap_set_gpio_triggering(bank, offset, trigger);
 
-	/* For level-triggered GPIOs, the clearing must be done after
-	 * the HW source is cleared, thus after the handler has run */
-	if (bank->level_mask & BIT(offset)) {
-		omap_set_gpio_irqenable(bank, offset, 0);
-		omap_clear_gpio_irqstatus(bank, offset);
-	}
-
 	omap_set_gpio_irqenable(bank, offset, 1);
+
+	/*
+	 * For level-triggered GPIOs, clearing must be done after the source
+	 * is cleared, thus after the handler has run. OMAP4 needs this done
+	 * after enabing the interrupt to clear the wakeup status.
+	 */
+	if (bank->level_mask & BIT(offset))
+		omap_clear_gpio_irqstatus(bank, offset);
+
 	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 7a63058..32d22bd 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -774,6 +774,9 @@
 	struct pxa_gpio_bank *c;
 	int gpio;
 
+	if (!pchip)
+		return 0;
+
 	for_each_gpio_bank(gpio, c, pchip) {
 		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
 		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
@@ -792,6 +795,9 @@
 	struct pxa_gpio_bank *c;
 	int gpio;
 
+	if (!pchip)
+		return;
+
 	for_each_gpio_bank(gpio, c, pchip) {
 		/* restore level with set/clear */
 		writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aac8432..b863386 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -531,7 +531,13 @@
 
 	of_node_get(chip->of_node);
 
-	return of_gpiochip_scan_gpios(chip);
+	status = of_gpiochip_scan_gpios(chip);
+	if (status) {
+		of_node_put(chip->of_node);
+		gpiochip_remove_pin_ranges(chip);
+	}
+
+	return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b59441d..4a95974 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3069,6 +3069,7 @@
 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
 	}
 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6509031..26c4bef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1600,7 +1600,8 @@
 	if (vma->vm_file != filp)
 		return false;
 
-	return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+	return vma->vm_start == addr &&
+	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
 }
 
 /**
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 863d030..e7a6651 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1473,7 +1473,6 @@
 	if (IS_ERR(regmap))
 		ret = PTR_ERR(regmap);
 	if (ret) {
-		ret = PTR_ERR(regmap);
 		dev_err(dev,
 			"Failed to get system configuration registers: %d\n",
 			ret);
@@ -1529,6 +1528,7 @@
 	of_node_put(remote);
 
 	hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+	of_node_put(i2c_np);
 	if (!hdmi->ddc_adpt) {
 		dev_err(dev, "Failed to get ddc i2c adapter by node\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 68b0171..9c99577 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1036,6 +1036,9 @@
 			"\tClock master = %s\n",
 			display->ctrl[display->clk_master_idx].ctrl->name);
 
+	if (len > user_len)
+		len = user_len;
+
 	if (copy_to_user(user_buf, buf, len)) {
 		kfree(buf);
 		return -EFAULT;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 2c7f758..f8e3f23 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -757,6 +757,25 @@
 
 	return true;
 }
+static int _sde_crtc_get_ctlstart_timeout(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	int rc = 0;
+
+	if (!crtc || !crtc->dev)
+		return 0;
+
+	list_for_each_entry(encoder,
+			&crtc->dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_CMD)
+			rc += sde_encoder_get_ctlstart_timeout_state(encoder);
+	}
+
+	return rc;
+}
 
 static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
 	struct sde_plane_state *pstate, struct sde_format *format)
@@ -3184,7 +3203,13 @@
 	if (unlikely(!sde_crtc->num_mixers))
 		return;
 
-	_sde_crtc_blend_setup(crtc, old_state, true);
+	if (_sde_crtc_get_ctlstart_timeout(crtc)) {
+		_sde_crtc_blend_setup(crtc, old_state, false);
+		SDE_ERROR("border fill only commit after ctlstart timeout\n");
+	} else {
+		_sde_crtc_blend_setup(crtc, old_state, true);
+	}
+
 	_sde_crtc_dest_scaler_setup(crtc);
 
 	/* cancel the idle notify delayed work */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 34777b30..0237b99 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -3229,6 +3229,24 @@
 	return 0;
 }
 
+int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+	int i, count = 0;
+
+	if (!drm_enc)
+		return 0;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		count += atomic_read(&sde_enc->phys_encs[i]->ctlstart_timeout);
+		atomic_set(&sde_enc->phys_encs[i]->ctlstart_timeout, 0);
+	}
+
+	return count;
+}
+
 /**
  * _sde_encoder_trigger_flush - trigger flush for a physical encoder
  * drm_enc: Pointer to drm encoder structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index a4cf467..62b0c9b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -266,4 +266,11 @@
  */
 void sde_encoder_control_idle_pc(struct drm_encoder *enc, bool enable);
 
+/**
+ * sde_encoder_get_ctlstart_timeout_state - checks if ctl start timeout happened
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return:     non zero value if ctl start timeout occurred
+ */
+int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *enc);
+
 #endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 163a36d..ab16156 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -267,6 +267,7 @@
  * @pending_retire_fence_cnt:   Atomic counter tracking the pending retire
  *                              fences that have to be signalled.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ * @ctlstart_timeout:		Indicates if ctl start timeout occurred
  * @irq:			IRQ tracking structures
  * @cont_splash_single_flush	Variable to check if single flush is enabled.
  * @cont_splash_settings	Variable to store continuous splash settings.
@@ -301,6 +302,7 @@
 	atomic_t pending_kickoff_cnt;
 	atomic_t pending_retire_fence_cnt;
 	wait_queue_head_t pending_kickoff_wq;
+	atomic_t ctlstart_timeout;
 	struct sde_encoder_irq irq[INTR_IDX_MAX];
 	u32 cont_splash_single_flush;
 	bool cont_splash_settings;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 4409408..d6e8fd3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -193,6 +193,7 @@
 				phys_enc,
 				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
 		atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+		atomic_set(&phys_enc->ctlstart_timeout, 0);
 	}
 
 	/* notify all synchronous clients first, then asynchronous clients */
@@ -292,6 +293,7 @@
 
 	ctl = phys_enc->hw_ctl;
 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+	atomic_set(&phys_enc->ctlstart_timeout, 0);
 
 	time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
 
@@ -1054,6 +1056,7 @@
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
+	atomic_set(&phys_enc->ctlstart_timeout, 0);
 	SDE_DEBUG_CMDENC(cmd_enc, "pp %d state %d\n",
 			phys_enc->hw_pp->idx - PINGPONG_0,
 			phys_enc->enable_state);
@@ -1176,6 +1179,9 @@
 					"ctl start interrupt wait failed\n");
 		else
 			ret = 0;
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			atomic_inc_return(&phys_enc->ctlstart_timeout);
 	}
 
 	return ret;
@@ -1476,6 +1482,7 @@
 	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
 	atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+	atomic_set(&phys_enc->ctlstart_timeout, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
 	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 434d1e2..cd37d00e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -750,7 +750,9 @@
 		/* Disable the crtc to ensure a full modeset is
 		 * performed whenever it's turned on again. */
 		if (crtc)
-			drm_crtc_force_disable(crtc);
+			drm_crtc_helper_set_mode(crtc, &crtc->mode,
+						 crtc->x, crtc->y,
+						 crtc->primary->fb);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index d960d39..f09388e 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1299,6 +1299,7 @@
 			return -EINVAL;
 		}
 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+		break;
 	case CB_TARGET_MASK:
 		track->cb_target_mask = radeon_get_ib_value(p, idx);
 		track->cb_dirty = true;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 97828fa..d58991b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -137,6 +137,8 @@
 		ret = -ENOMEM;
 		goto free_drm;
 	}
+
+	dev_set_drvdata(dev, drm);
 	drm->dev_private = drv;
 
 	drm_vblank_init(drm, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c7e6c98..51d34e7 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -846,7 +846,7 @@
 vc4_crtc_reset(struct drm_crtc *crtc)
 {
 	if (crtc->state)
-		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+		vc4_crtc_destroy_state(crtc, crtc->state);
 
 	crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
 	if (crtc->state)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index aec6e9e..55884cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -531,11 +531,9 @@
 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
 	};
-	struct drm_display_mode *old_mode;
 	struct drm_display_mode *mode;
 	int ret;
 
-	old_mode = par->set_mode;
 	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
 	if (!mode) {
 		DRM_ERROR("Could not create new fb mode.\n");
@@ -546,11 +544,7 @@
 	mode->vdisplay = var->yres;
 	vmw_guess_mode_timing(mode);
 
-	if (old_mode && drm_mode_equal(old_mode, mode)) {
-		drm_mode_destroy(vmw_priv->dev, mode);
-		mode = old_mode;
-		old_mode = NULL;
-	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
+	if (!vmw_kms_validate_mode_vram(vmw_priv,
 					mode->hdisplay *
 					DIV_ROUND_UP(var->bits_per_pixel, 8),
 					mode->vdisplay)) {
@@ -613,8 +607,8 @@
 	schedule_delayed_work(&par->local_work, 0);
 
 out_unlock:
-	if (old_mode)
-		drm_mode_destroy(vmw_priv->dev, old_mode);
+	if (par->set_mode)
+		drm_mode_destroy(vmw_priv->dev, par->set_mode);
 	par->set_mode = mode;
 
 	drm_modeset_unlock_all(vmw_priv->dev);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 99c813a..57d22bc 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -884,8 +884,8 @@
 	.cpmem_ofs = 0x1f000000,
 	.srm_ofs = 0x1f040000,
 	.tpm_ofs = 0x1f060000,
-	.csi0_ofs = 0x1f030000,
-	.csi1_ofs = 0x1f038000,
+	.csi0_ofs = 0x1e030000,
+	.csi1_ofs = 0x1e038000,
 	.ic_ofs = 0x1e020000,
 	.disp0_ofs = 0x1e040000,
 	.disp1_ofs = 0x1e048000,
@@ -900,8 +900,8 @@
 	.cpmem_ofs = 0x07000000,
 	.srm_ofs = 0x07040000,
 	.tpm_ofs = 0x07060000,
-	.csi0_ofs = 0x07030000,
-	.csi1_ofs = 0x07038000,
+	.csi0_ofs = 0x06030000,
+	.csi1_ofs = 0x06038000,
 	.ic_ofs = 0x06020000,
 	.disp0_ofs = 0x06040000,
 	.disp1_ofs = 0x06048000,
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 98686ed..33de3a1 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@
 		ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
 				DP_COM_CONF_CSC_DEF_BOTH);
 	} else {
-		if (flow->foreground.in_cs == flow->out_cs)
+		if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+		    flow->foreground.in_cs == flow->out_cs)
 			/*
 			 * foreground identical to output, apply color
 			 * conversion on background
@@ -261,6 +262,8 @@
 	struct ipu_dp_priv *priv = flow->priv;
 	u32 reg, csc;
 
+	dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
 	if (!dp->foreground)
 		return;
 
@@ -268,8 +271,9 @@
 
 	reg = readl(flow->base + DP_COM_CONF);
 	csc = reg & DP_COM_CONF_CSC_DEF_MASK;
-	if (csc == DP_COM_CONF_CSC_DEF_FG)
-		reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+	reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+	if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+		reg |= DP_COM_CONF_CSC_DEF_BG;
 
 	reg &= ~DP_COM_CONF_FG_EN;
 	writel(reg, flow->base + DP_COM_CONF);
@@ -350,6 +354,8 @@
 	mutex_init(&priv->mutex);
 
 	for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+		priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+		priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
 		priv->flow[i].foreground.foreground = true;
 		priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
 		priv->flow[i].priv = priv;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index d7179dd..3cafa1d 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1058,10 +1058,15 @@
 	seq_printf(f, "\n\n");
 
 	/* dump parsed data and input mappings */
+	if (down_interruptible(&hdev->driver_input_lock))
+		return 0;
+
 	hid_dump_device(hdev, f);
 	seq_printf(f, "\n");
 	hid_dump_input_mapping(hdev, f);
 
+	up(&hdev->driver_input_lock);
+
 	return 0;
 }
 
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fc7ada2..9f7b1cf 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -607,6 +607,14 @@
 			break;
 		}
 
+		if ((usage->hid & 0xf0) == 0xb0) {	/* SC - Display */
+			switch (usage->hid & 0xf) {
+			case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+			default: goto ignore;
+			}
+			break;
+		}
+
 		/*
 		 * Some lazy vendors declare 255 usages for System Control,
 		 * leading to the creation of ABS_X|Y axis and too many others.
@@ -802,6 +810,10 @@
 		case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);		break;
 		case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);		break;
 
+		case 0x079: map_key_clear(KEY_KBDILLUMUP);	break;
+		case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);	break;
+		case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);	break;
+
 		case 0x082: map_key_clear(KEY_VIDEO_NEXT);	break;
 		case 0x083: map_key_clear(KEY_LAST);		break;
 		case 0x084: map_key_clear(KEY_ENTER);		break;
@@ -932,6 +944,8 @@
 		case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);	break;
 		case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);	break;
 
+		case 0x29f: map_key_clear(KEY_SCALE);		break;
+
 		default: map_key_clear(KEY_UNKNOWN);
 		}
 		break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2e2515a..3198faf 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1282,6 +1282,13 @@
 		kfree(data);
 		return -ENOMEM;
 	}
+	data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+	if (!data->wq) {
+		kfree(data->effect_ids);
+		kfree(data);
+		return -ENOMEM;
+	}
+
 	data->hidpp = hidpp;
 	data->feature_index = feature_index;
 	data->version = version;
@@ -1326,7 +1333,6 @@
 	/* ignore boost value at response.fap.params[2] */
 
 	/* init the hardware command queue */
-	data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
 	atomic_set(&data->workqueue_size, 0);
 
 	/* initialize with zero autocenter to get wheel in usable state */
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 832d8f9..099e1ce 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,3 +3,6 @@
 #
 
 obj-$(CONFIG_I2C_HID)				+= i2c-hid.o
+
+i2c-hid-objs					=  i2c-hid-core.o
+i2c-hid-$(CONFIG_DMI)				+= i2c-hid-dmi-quirks.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c
similarity index 96%
rename from drivers/hid/i2c-hid/i2c-hid.c
rename to drivers/hid/i2c-hid/i2c-hid-core.c
index ce2b800..850527d 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -42,6 +42,7 @@
 #include <linux/i2c/i2c-hid.h>
 
 #include "../hid-ids.h"
+#include "i2c-hid.h"
 
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
@@ -724,6 +725,7 @@
 	char *rdesc;
 	int ret;
 	int tries = 3;
+	char *use_override;
 
 	i2c_hid_dbg(ihid, "entering %s\n", __func__);
 
@@ -742,26 +744,37 @@
 	if (ret)
 		return ret;
 
-	rdesc = kzalloc(rsize, GFP_KERNEL);
+	use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
+								&rsize);
 
-	if (!rdesc) {
-		dbg_hid("couldn't allocate rdesc memory\n");
-		return -ENOMEM;
-	}
+	if (use_override) {
+		rdesc = use_override;
+		i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
+	} else {
+		rdesc = kzalloc(rsize, GFP_KERNEL);
 
-	i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+		if (!rdesc) {
+			dbg_hid("couldn't allocate rdesc memory\n");
+			return -ENOMEM;
+		}
 
-	ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
-	if (ret) {
-		hid_err(hid, "reading report descriptor failed\n");
-		kfree(rdesc);
-		return -EIO;
+		i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+		ret = i2c_hid_command(client, &hid_report_descr_cmd,
+				      rdesc, rsize);
+		if (ret) {
+			hid_err(hid, "reading report descriptor failed\n");
+			kfree(rdesc);
+			return -EIO;
+		}
 	}
 
 	i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
 
 	ret = hid_parse_report(hid, rdesc, rsize);
-	kfree(rdesc);
+	if (!use_override)
+		kfree(rdesc);
+
 	if (ret) {
 		dbg_hid("parsing report descriptor failed\n");
 		return ret;
@@ -899,12 +912,19 @@
 	int ret;
 
 	/* i2c hid fetch using a fixed descriptor size (30 bytes) */
-	i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
-	ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
-				sizeof(struct i2c_hid_desc));
-	if (ret) {
-		dev_err(&client->dev, "hid_descr_cmd failed\n");
-		return -ENODEV;
+	if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
+		i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
+		ihid->hdesc =
+			*i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
+	} else {
+		i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+		ret = i2c_hid_command(client, &hid_descr_cmd,
+				      ihid->hdesc_buffer,
+				      sizeof(struct i2c_hid_desc));
+		if (ret) {
+			dev_err(&client->dev, "hid_descr_cmd failed\n");
+			return -ENODEV;
+		}
 	}
 
 	/* Validate the length of HID descriptor, the 4 first bytes:
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
new file mode 100644
index 0000000..cac262a
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Quirks for I2C-HID devices that do not supply proper descriptors
+ *
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+
+#include "i2c-hid.h"
+
+
+struct i2c_hid_desc_override {
+	union {
+		struct i2c_hid_desc *i2c_hid_desc;
+		uint8_t             *i2c_hid_desc_buffer;
+	};
+	uint8_t              *hid_report_desc;
+	unsigned int          hid_report_desc_size;
+	uint8_t              *i2c_name;
+};
+
+
+/*
+ * descriptors for the SIPODEV SP1064 touchpad
+ *
+ * This device does not supply any descriptors and on windows a filter
+ * driver operates between the i2c-hid layer and the device and injects
+ * these descriptors when the device is prompted. The descriptors were
+ * extracted by listening to the i2c-hid traffic that occurs between the
+ * windows filter driver and the windows i2c-hid driver.
+ */
+
+static const struct i2c_hid_desc_override sipodev_desc = {
+	.i2c_hid_desc_buffer = (uint8_t [])
+	{0x1e, 0x00,                  /* Length of descriptor                 */
+	 0x00, 0x01,                  /* Version of descriptor                */
+	 0xdb, 0x01,                  /* Length of report descriptor          */
+	 0x21, 0x00,                  /* Location of report descriptor        */
+	 0x24, 0x00,                  /* Location of input report             */
+	 0x1b, 0x00,                  /* Max input report length              */
+	 0x25, 0x00,                  /* Location of output report            */
+	 0x11, 0x00,                  /* Max output report length             */
+	 0x22, 0x00,                  /* Location of command register         */
+	 0x23, 0x00,                  /* Location of data register            */
+	 0x11, 0x09,                  /* Vendor ID                            */
+	 0x88, 0x52,                  /* Product ID                           */
+	 0x06, 0x00,                  /* Version ID                           */
+	 0x00, 0x00, 0x00, 0x00       /* Reserved                             */
+	},
+
+	.hid_report_desc = (uint8_t [])
+	{0x05, 0x01,                  /* Usage Page (Desktop),                */
+	 0x09, 0x02,                  /* Usage (Mouse),                       */
+	 0xA1, 0x01,                  /* Collection (Application),            */
+	 0x85, 0x01,                  /*     Report ID (1),                   */
+	 0x09, 0x01,                  /*     Usage (Pointer),                 */
+	 0xA1, 0x00,                  /*     Collection (Physical),           */
+	 0x05, 0x09,                  /*         Usage Page (Button),         */
+	 0x19, 0x01,                  /*         Usage Minimum (01h),         */
+	 0x29, 0x02,                  /*         Usage Maximum (02h),         */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x95, 0x06,                  /*         Report Count (6),            */
+	 0x81, 0x01,                  /*         Input (Constant),            */
+	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
+	 0x09, 0x30,                  /*         Usage (X),                   */
+	 0x09, 0x31,                  /*         Usage (Y),                   */
+	 0x15, 0x81,                  /*         Logical Minimum (-127),      */
+	 0x25, 0x7F,                  /*         Logical Maximum (127),       */
+	 0x75, 0x08,                  /*         Report Size (8),             */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x81, 0x06,                  /*         Input (Variable, Relative),  */
+	 0xC0,                        /*     End Collection,                  */
+	 0xC0,                        /* End Collection,                      */
+	 0x05, 0x0D,                  /* Usage Page (Digitizer),              */
+	 0x09, 0x05,                  /* Usage (Touchpad),                    */
+	 0xA1, 0x01,                  /* Collection (Application),            */
+	 0x85, 0x04,                  /*     Report ID (4),                   */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x02,                  /*     Collection (Logical),            */
+	 0x15, 0x00,                  /*         Logical Minimum (0),         */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
+	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x75, 0x03,                  /*         Report Size (3),             */
+	 0x25, 0x05,                  /*         Logical Maximum (5),         */
+	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x03,                  /*         Report Count (3),            */
+	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
+	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
+	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
+	 0x75, 0x10,                  /*         Report Size (16),            */
+	 0x55, 0x0E,                  /*         Unit Exponent (14),          */
+	 0x65, 0x11,                  /*         Unit (Centimeter),           */
+	 0x09, 0x30,                  /*         Usage (X),                   */
+	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
+	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
+	 0x09, 0x31,                  /*         Usage (Y),                   */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0xC0,                        /*     End Collection,                  */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x02,                  /*     Collection (Logical),            */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
+	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x75, 0x03,                  /*         Report Size (3),             */
+	 0x25, 0x05,                  /*         Logical Maximum (5),         */
+	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x03,                  /*         Report Count (3),            */
+	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
+	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
+	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
+	 0x75, 0x10,                  /*         Report Size (16),            */
+	 0x09, 0x30,                  /*         Usage (X),                   */
+	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
+	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
+	 0x09, 0x31,                  /*         Usage (Y),                   */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0xC0,                        /*     End Collection,                  */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x02,                  /*     Collection (Logical),            */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
+	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x75, 0x03,                  /*         Report Size (3),             */
+	 0x25, 0x05,                  /*         Logical Maximum (5),         */
+	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x03,                  /*         Report Count (3),            */
+	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
+	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
+	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
+	 0x75, 0x10,                  /*         Report Size (16),            */
+	 0x09, 0x30,                  /*         Usage (X),                   */
+	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
+	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
+	 0x09, 0x31,                  /*         Usage (Y),                   */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0xC0,                        /*     End Collection,                  */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x02,                  /*     Collection (Logical),            */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
+	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x75, 0x03,                  /*         Report Size (3),             */
+	 0x25, 0x05,                  /*         Logical Maximum (5),         */
+	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x03,                  /*         Report Count (3),            */
+	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
+	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
+	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
+	 0x75, 0x10,                  /*         Report Size (16),            */
+	 0x09, 0x30,                  /*         Usage (X),                   */
+	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
+	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
+	 0x09, 0x31,                  /*         Usage (Y),                   */
+	 0x81, 0x02,                  /*         Input (Variable),            */
+	 0xC0,                        /*     End Collection,                  */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x55, 0x0C,                  /*     Unit Exponent (12),              */
+	 0x66, 0x01, 0x10,            /*     Unit (Seconds),                  */
+	 0x47, 0xFF, 0xFF, 0x00, 0x00,/*     Physical Maximum (65535),        */
+	 0x27, 0xFF, 0xFF, 0x00, 0x00,/*     Logical Maximum (65535),         */
+	 0x75, 0x10,                  /*     Report Size (16),                */
+	 0x95, 0x01,                  /*     Report Count (1),                */
+	 0x09, 0x56,                  /*     Usage (Scan Time),               */
+	 0x81, 0x02,                  /*     Input (Variable),                */
+	 0x09, 0x54,                  /*     Usage (Contact Count),           */
+	 0x25, 0x7F,                  /*     Logical Maximum (127),           */
+	 0x75, 0x08,                  /*     Report Size (8),                 */
+	 0x81, 0x02,                  /*     Input (Variable),                */
+	 0x05, 0x09,                  /*     Usage Page (Button),             */
+	 0x09, 0x01,                  /*     Usage (01h),                     */
+	 0x25, 0x01,                  /*     Logical Maximum (1),             */
+	 0x75, 0x01,                  /*     Report Size (1),                 */
+	 0x95, 0x01,                  /*     Report Count (1),                */
+	 0x81, 0x02,                  /*     Input (Variable),                */
+	 0x95, 0x07,                  /*     Report Count (7),                */
+	 0x81, 0x03,                  /*     Input (Constant, Variable),      */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x85, 0x02,                  /*     Report ID (2),                   */
+	 0x09, 0x55,                  /*     Usage (Contact Count Maximum),   */
+	 0x09, 0x59,                  /*     Usage (59h),                     */
+	 0x75, 0x04,                  /*     Report Size (4),                 */
+	 0x95, 0x02,                  /*     Report Count (2),                */
+	 0x25, 0x0F,                  /*     Logical Maximum (15),            */
+	 0xB1, 0x02,                  /*     Feature (Variable),              */
+	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
+	 0x85, 0x07,                  /*     Report ID (7),                   */
+	 0x09, 0x60,                  /*     Usage (60h),                     */
+	 0x75, 0x01,                  /*     Report Size (1),                 */
+	 0x95, 0x01,                  /*     Report Count (1),                */
+	 0x25, 0x01,                  /*     Logical Maximum (1),             */
+	 0xB1, 0x02,                  /*     Feature (Variable),              */
+	 0x95, 0x07,                  /*     Report Count (7),                */
+	 0xB1, 0x03,                  /*     Feature (Constant, Variable),    */
+	 0x85, 0x06,                  /*     Report ID (6),                   */
+	 0x06, 0x00, 0xFF,            /*     Usage Page (FF00h),              */
+	 0x09, 0xC5,                  /*     Usage (C5h),                     */
+	 0x26, 0xFF, 0x00,            /*     Logical Maximum (255),           */
+	 0x75, 0x08,                  /*     Report Size (8),                 */
+	 0x96, 0x00, 0x01,            /*     Report Count (256),              */
+	 0xB1, 0x02,                  /*     Feature (Variable),              */
+	 0xC0,                        /* End Collection,                      */
+	 0x06, 0x00, 0xFF,            /* Usage Page (FF00h),                  */
+	 0x09, 0x01,                  /* Usage (01h),                         */
+	 0xA1, 0x01,                  /* Collection (Application),            */
+	 0x85, 0x0D,                  /*     Report ID (13),                  */
+	 0x26, 0xFF, 0x00,            /*     Logical Maximum (255),           */
+	 0x19, 0x01,                  /*     Usage Minimum (01h),             */
+	 0x29, 0x02,                  /*     Usage Maximum (02h),             */
+	 0x75, 0x08,                  /*     Report Size (8),                 */
+	 0x95, 0x02,                  /*     Report Count (2),                */
+	 0xB1, 0x02,                  /*     Feature (Variable),              */
+	 0xC0,                        /* End Collection,                      */
+	 0x05, 0x0D,                  /* Usage Page (Digitizer),              */
+	 0x09, 0x0E,                  /* Usage (Configuration),               */
+	 0xA1, 0x01,                  /* Collection (Application),            */
+	 0x85, 0x03,                  /*     Report ID (3),                   */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x02,                  /*     Collection (Logical),            */
+	 0x09, 0x52,                  /*         Usage (Device Mode),         */
+	 0x25, 0x0A,                  /*         Logical Maximum (10),        */
+	 0x95, 0x01,                  /*         Report Count (1),            */
+	 0xB1, 0x02,                  /*         Feature (Variable),          */
+	 0xC0,                        /*     End Collection,                  */
+	 0x09, 0x22,                  /*     Usage (Finger),                  */
+	 0xA1, 0x00,                  /*     Collection (Physical),           */
+	 0x85, 0x05,                  /*         Report ID (5),               */
+	 0x09, 0x57,                  /*         Usage (57h),                 */
+	 0x09, 0x58,                  /*         Usage (58h),                 */
+	 0x75, 0x01,                  /*         Report Size (1),             */
+	 0x95, 0x02,                  /*         Report Count (2),            */
+	 0x25, 0x01,                  /*         Logical Maximum (1),         */
+	 0xB1, 0x02,                  /*         Feature (Variable),          */
+	 0x95, 0x06,                  /*         Report Count (6),            */
+	 0xB1, 0x03,                  /*         Feature (Constant, Variable),*/
+	 0xC0,                        /*     End Collection,                  */
+	 0xC0                         /* End Collection                       */
+	},
+	.hid_report_desc_size = 475,
+	.i2c_name = "SYNA3602:00"
+};
+
+
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+	{
+		.ident = "Teclast F6 Pro",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{
+		.ident = "Teclast F7",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{
+		.ident = "Trekstor Primebook C13",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{
+		.ident = "Trekstor Primebook C11",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{
+		.ident = "Direkt-Tek DTLAPY116-2",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{
+		.ident = "Mediacom Flexbook Edge 11",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+		},
+		.driver_data = (void *)&sipodev_desc
+	},
+	{ }	/* Terminate list */
+};
+
+
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{
+	struct i2c_hid_desc_override *override;
+	const struct dmi_system_id *system_id;
+
+	system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+	if (!system_id)
+		return NULL;
+
+	override = system_id->driver_data;
+	if (strcmp(override->i2c_name, i2c_name))
+		return NULL;
+
+	return override->i2c_hid_desc;
+}
+
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+					       unsigned int *size)
+{
+	struct i2c_hid_desc_override *override;
+	const struct dmi_system_id *system_id;
+
+	system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+	if (!system_id)
+		return NULL;
+
+	override = system_id->driver_data;
+	if (strcmp(override->i2c_name, i2c_name))
+		return NULL;
+
+	*size = override->hid_report_desc_size;
+	return override->hid_report_desc;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
new file mode 100644
index 0000000..a8c19ae
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef I2C_HID_H
+#define I2C_HID_H
+
+
+#ifdef CONFIG_DMI
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+					       unsigned int *size);
+#else
+static inline struct i2c_hid_desc
+		   *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{ return NULL; }
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+							     unsigned int *size)
+{ return NULL; }
+#endif
+
+#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d..41d4453 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -92,7 +92,10 @@
 			IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
 	} else {
 		pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
-		interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
+		interrupt_generated = !!pisr_val;
+		/* only busy-clear bit is RW, others are RO */
+		if (pisr_val)
+			ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
 	}
 
 	return interrupt_generated;
@@ -795,11 +798,11 @@
 {
 	ish_set_host_rdy(dev);
 
+	set_host_ready(dev);
+
 	/* After that we can enable ISH DMA operation and wakeup ISHFW */
 	ish_wakeup(dev);
 
-	set_host_ready(dev);
-
 	/* wait for FW-initiated reset flow */
 	if (!dev->recvd_hw_ready)
 		wait_event_interruptible_timeout(dev->wait_hw_ready,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 2565215..0de18c7 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -628,7 +628,8 @@
 	spin_lock_irqsave(&cl->dev->device_list_lock, flags);
 	list_for_each_entry(cl_device, &cl->dev->device_list,
 			device_link) {
-		if (cl_device->fw_client->client_id == cl->fw_client_id) {
+		if (cl_device->fw_client &&
+		    cl_device->fw_client->client_id == cl->fw_client_id) {
 			cl->device = cl_device;
 			rv = 0;
 			break;
@@ -688,6 +689,7 @@
 	spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
 	list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
 				 device_link) {
+		cl_device->fw_client = NULL;
 		if (warm_reset && cl_device->reference_count)
 			continue;
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
old mode 100644
new mode 100755
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 33e0936..98a4cb5 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -599,11 +599,15 @@
 {
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
 	int port = othdev->output.port;
+	int master;
 
 	spin_lock(&gth->gth_lock);
 	othdev->output.port = -1;
 	othdev->output.active = false;
 	gth->output[port].output = NULL;
+	for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
+		if (gth->master[master] == port)
+			gth->master[master] = -1;
 	spin_unlock(&gth->gth_lock);
 }
 
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index f91d9fa..aadae9d 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -90,6 +90,7 @@
  * @reg_base:		register window base address
  * @thdev:		intel_th_device pointer
  * @win_list:		list of windows in multiblock mode
+ * @single_sgt:		single mode buffer
  * @nr_pages:		total number of pages allocated for this buffer
  * @single_sz:		amount of data in single mode
  * @single_wrap:	single mode wrap occurred
@@ -110,6 +111,7 @@
 	struct intel_th_device	*thdev;
 
 	struct list_head	win_list;
+	struct sg_table		single_sgt;
 	unsigned long		nr_pages;
 	unsigned long		single_sz;
 	unsigned int		single_wrap : 1;
@@ -623,22 +625,45 @@
  */
 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
 {
+	unsigned long nr_pages = size >> PAGE_SHIFT;
 	unsigned int order = get_order(size);
 	struct page *page;
+	int ret;
 
 	if (!size)
 		return 0;
 
+	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+	if (ret)
+		goto err_out;
+
+	ret = -ENOMEM;
 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
 	if (!page)
-		return -ENOMEM;
+		goto err_free_sgt;
 
 	split_page(page, order);
-	msc->nr_pages = size >> PAGE_SHIFT;
+	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+			 DMA_FROM_DEVICE);
+	if (ret < 0)
+		goto err_free_pages;
+
+	msc->nr_pages = nr_pages;
 	msc->base = page_address(page);
-	msc->base_addr = page_to_phys(page);
+	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
 
 	return 0;
+
+err_free_pages:
+	__free_pages(page, order);
+
+err_free_sgt:
+	sg_free_table(&msc->single_sgt);
+
+err_out:
+	return ret;
 }
 
 /**
@@ -649,6 +674,10 @@
 {
 	unsigned long off;
 
+	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+		     1, DMA_FROM_DEVICE);
+	sg_free_table(&msc->single_sgt);
+
 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
 		struct page *page = virt_to_page(msc->base + off);
 
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index f260922..beeb99c 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -227,8 +227,8 @@
 	bitmap_release_region(&master->chan_map[0], output->channel,
 			      ilog2(output->nr_chans));
 
-	output->nr_chans = 0;
 	master->nr_free += output->nr_chans;
+	output->nr_chans = 0;
 }
 
 /*
@@ -253,6 +253,9 @@
 			;
 		if (i == width)
 			return pos;
+
+		/* step over [pos..pos+i) to continue search */
+		pos += i;
 	}
 
 	return -1;
@@ -565,7 +568,7 @@
 {
 	struct stm_device *stm = stmf->stm;
 	struct stp_policy_id *id;
-	int ret = -EINVAL;
+	int ret = -EINVAL, wlimit = 1;
 	u32 size;
 
 	if (stmf->output.nr_chans)
@@ -593,8 +596,10 @@
 	if (id->__reserved_0 || id->__reserved_1)
 		goto err_free;
 
-	if (id->width < 1 ||
-	    id->width > PAGE_SIZE / stm->data->sw_mmiosz)
+	if (stm->data->sw_mmiosz)
+		wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
+
+	if (id->width < 1 || id->width > wlimit)
 		goto err_free;
 
 	ret = stm_file_assign(stmf, id->id, id->width);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 45d6771..59c08d5 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@
 	 * Check for the message size against FIFO depth and set the
 	 * 'hold bus' bit if it is greater than FIFO depth.
 	 */
-	if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+	if ((id->recv_count > CDNS_I2C_FIFO_DEPTH)  || id->bus_hold_flag)
 		ctrl_reg |= CDNS_I2C_CR_HOLD;
+	else
+		ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
 
 	cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
 
@@ -440,8 +442,11 @@
 	 * Check for the message size against FIFO depth and set the
 	 * 'hold bus' bit if it is greater than FIFO depth.
 	 */
-	if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+	if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
 		ctrl_reg |= CDNS_I2C_CR_HOLD;
+	else
+		ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+
 	cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
 
 	/* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 586e557..36100f4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -794,7 +794,7 @@
 /* payload size is only 12 bit */
 static struct i2c_adapter_quirks tegra_i2c_quirks = {
 	.max_read_len = 4096,
-	.max_write_len = 4096,
+	.max_write_len = 4096 - 12,
 };
 
 static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7484aac..80d82c6 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -3250,16 +3250,16 @@
 				   the underlying bus driver */
 		break;
 	case I2C_SMBUS_I2C_BLOCK_DATA:
+		if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+			dev_err(&adapter->dev, "Invalid block %s size %d\n",
+				read_write == I2C_SMBUS_READ ? "read" : "write",
+				data->block[0]);
+			return -EINVAL;
+		}
 		if (read_write == I2C_SMBUS_READ) {
 			msg[1].len = data->block[0];
 		} else {
 			msg[0].len = data->block[0] + 1;
-			if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
-				dev_err(&adapter->dev,
-					"Invalid block write size %d\n",
-					data->block[0]);
-				return -EINVAL;
-			}
 			for (i = 1; i <= data->block[0]; i++)
 				msgbuf0[i] = data->block[i];
 		}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5ded9b2..a6fa32c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1107,14 +1107,14 @@
 	ICPU(INTEL_FAM6_WESTMERE,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_WESTMERE_EP,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_NEHALEM_EX,		idle_cpu_nehalem),
-	ICPU(INTEL_FAM6_ATOM_PINEVIEW,		idle_cpu_atom),
-	ICPU(INTEL_FAM6_ATOM_LINCROFT,		idle_cpu_lincroft),
+	ICPU(INTEL_FAM6_ATOM_BONNELL,		idle_cpu_atom),
+	ICPU(INTEL_FAM6_ATOM_BONNELL_MID,		idle_cpu_lincroft),
 	ICPU(INTEL_FAM6_WESTMERE_EX,		idle_cpu_nehalem),
 	ICPU(INTEL_FAM6_SANDYBRIDGE,		idle_cpu_snb),
 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		idle_cpu_snb),
-	ICPU(INTEL_FAM6_ATOM_CEDARVIEW,		idle_cpu_atom),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	idle_cpu_byt),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,	idle_cpu_tangier),
+	ICPU(INTEL_FAM6_ATOM_SALTWELL,		idle_cpu_atom),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT,	idle_cpu_byt),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,	idle_cpu_tangier),
 	ICPU(INTEL_FAM6_ATOM_AIRMONT,		idle_cpu_cht),
 	ICPU(INTEL_FAM6_IVYBRIDGE,		idle_cpu_ivb),
 	ICPU(INTEL_FAM6_IVYBRIDGE_X,		idle_cpu_ivt),
@@ -1122,7 +1122,7 @@
 	ICPU(INTEL_FAM6_HASWELL_X,		idle_cpu_hsw),
 	ICPU(INTEL_FAM6_HASWELL_ULT,		idle_cpu_hsw),
 	ICPU(INTEL_FAM6_HASWELL_GT3E,		idle_cpu_hsw),
-	ICPU(INTEL_FAM6_ATOM_SILVERMONT2,	idle_cpu_avn),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_X,	idle_cpu_avn),
 	ICPU(INTEL_FAM6_BROADWELL_CORE,		idle_cpu_bdw),
 	ICPU(INTEL_FAM6_BROADWELL_GT3E,		idle_cpu_bdw),
 	ICPU(INTEL_FAM6_BROADWELL_X,		idle_cpu_bdw),
@@ -1134,7 +1134,7 @@
 	ICPU(INTEL_FAM6_SKYLAKE_X,		idle_cpu_skx),
 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		idle_cpu_knl),
 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		idle_cpu_bxt),
-	ICPU(INTEL_FAM6_ATOM_DENVERTON,		idle_cpu_dnv),
+	ICPU(INTEL_FAM6_ATOM_GOLDMONT_X,	idle_cpu_dnv),
 	{}
 };
 
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 7846368..780f886 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1340,6 +1340,8 @@
 
 	mutex_lock(&data->mutex);
 	ret = kxcjk1013_set_mode(data, OPERATION);
+	if (ret == 0)
+		ret = kxcjk1013_set_range(data, data->range);
 	mutex_unlock(&data->mutex);
 
 	return ret;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 22c4c17..a1d072e 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -121,6 +121,7 @@
 	if (sigma_delta->info->has_registers) {
 		data[0] = reg << sigma_delta->info->addr_shift;
 		data[0] |= sigma_delta->info->read_mask;
+		data[0] |= sigma_delta->comm;
 		spi_message_add_tail(&t[0], &m);
 	}
 	spi_message_add_tail(&t[1], &m);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index e3e2155..dd9f228 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -704,23 +704,29 @@
 		ret = wait_event_interruptible_timeout(st->wq_data_avail,
 						       st->done,
 						       msecs_to_jiffies(1000));
-		if (ret == 0)
-			ret = -ETIMEDOUT;
-		if (ret < 0) {
-			mutex_unlock(&st->lock);
-			return ret;
-		}
 
-		*val = st->last_value;
-
+		/* Disable interrupts, regardless if adc conversion was
+		 * successful or not
+		 */
 		at91_adc_writel(st, AT91_ADC_CHDR,
 				AT91_ADC_CH(chan->channel));
 		at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
-		st->last_value = 0;
-		st->done = false;
+		if (ret > 0) {
+			/* a valid conversion took place */
+			*val = st->last_value;
+			st->last_value = 0;
+			st->done = false;
+			ret = IIO_VAL_INT;
+		} else if (ret == 0) {
+			/* conversion timeout */
+			dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+				chan->channel);
+			ret = -ETIMEDOUT;
+		}
+
 		mutex_unlock(&st->lock);
-		return IIO_VAL_INT;
+		return ret;
 
 	case IIO_CHAN_INFO_SCALE:
 		*val = st->vref_mv;
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index c15756d..eb8b735c 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -916,7 +916,7 @@
 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
 	struct exynos_adc *info = iio_priv(indio_dev);
 
-	if (IS_REACHABLE(CONFIG_INPUT)) {
+	if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
 		free_irq(info->tsirq, info);
 		input_unregister_device(info->input);
 	}
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 02dfbf8..0e6de1e 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -767,8 +767,7 @@
 
 		if (((prop->channel == RR_ADC_CHG_TEMP) ||
 			(prop->channel == RR_ADC_SKIN_TEMP) ||
-			(prop->channel == RR_ADC_USBIN_I) ||
-			(prop->channel == RR_ADC_DIE_TEMP)) &&
+			(prop->channel == RR_ADC_USBIN_I)) &&
 					((!rradc_is_usb_present(chip)))) {
 			pr_debug("USB not present for %d\n", prop->channel);
 			rc = -ENODATA;
@@ -938,6 +937,30 @@
 			goto fail;
 		}
 		break;
+	case RR_ADC_DIE_TEMP:
+		/* Force conversion every cycle */
+		rc = rradc_masked_write(chip, FG_ADC_RR_PMI_DIE_TEMP_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE);
+		if (rc < 0) {
+			pr_err("Force every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+
+		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+		if (rc < 0) {
+			pr_err("Error reading in continuous mode:%d\n", rc);
+			goto fail;
+		}
+
+		/* Restore aux_therm trigger */
+		rc = rradc_masked_write(chip, FG_ADC_RR_PMI_DIE_TEMP_TRIGGER,
+				FG_ADC_RR_USB_IN_V_EVERY_CYCLE_MASK, 0);
+		if (rc < 0) {
+			pr_err("Restore every cycle update failed:%d\n", rc);
+			goto fail;
+		}
+		break;
 	case RR_ADC_CHG_HOT_TEMP:
 	case RR_ADC_CHG_TOO_HOT_TEMP:
 	case RR_ADC_SKIN_HOT_TEMP:
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 56cf590..143894a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1299,7 +1299,7 @@
 	}
 	free_irq(irq, indio_dev);
 	clk_disable_unprepare(xadc->clk);
-	cancel_delayed_work(&xadc->zynq_unmask_work);
+	cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 	kfree(xadc->data);
 	kfree(indio_dev->channels);
 
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 821919d..b5a5517 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -583,11 +583,10 @@
 	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
 		return bmg160_get_filter(data, val);
 	case IIO_CHAN_INFO_SCALE:
-		*val = 0;
 		switch (chan->type) {
 		case IIO_TEMP:
-			*val2 = 500000;
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = 500;
+			return IIO_VAL_INT;
 		case IIO_ANGL_VEL:
 		{
 			int i;
@@ -595,6 +594,7 @@
 			for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
 				if (bmg160_scale_table[i].dps_range ==
 							data->dps_range) {
+					*val = 0;
 					*val2 = bmg160_scale_table[i].scale;
 					return IIO_VAL_INT_PLUS_MICRO;
 				}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index dd18b74..a2322b2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1872,8 +1872,10 @@
 	}
 	mutex_unlock(&ep->com.mutex);
 
-	if (release)
+	if (release) {
+		close_complete_upcall(ep, -ECONNRESET);
 		release_ep_resources(ep);
+	}
 	c4iw_put_ep(&ep->com);
 	return 0;
 }
@@ -3567,7 +3569,6 @@
 	if (close) {
 		if (abrupt) {
 			set_bit(EP_DISC_ABORT, &ep->com.history);
-			close_complete_upcall(ep, -ECONNRESET);
 			ret = send_abort(ep);
 		} else {
 			set_bit(EP_DISC_CLOSE, &ep->com.history);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e8e0fa5..b087866 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2394,7 +2394,7 @@
 			update_ack_queue(qp, next);
 		}
 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+		if (e->rdma_sge.mr) {
 			rvt_put_mr(e->rdma_sge.mr);
 			e->rdma_sge.mr = NULL;
 		}
@@ -2469,7 +2469,7 @@
 			update_ack_queue(qp, next);
 		}
 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
-		if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+		if (e->rdma_sge.mr) {
 			rvt_put_mr(e->rdma_sge.mr);
 			e->rdma_sge.mr = NULL;
 		}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 5e99390..ec13884 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -805,8 +805,8 @@
 	unsigned long flags;
 
 	for (i = 0 ; i < dev->num_ports; i++) {
-		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
 		det = &sriov->alias_guid.ports_guid[i];
+		cancel_delayed_work_sync(&det->alias_guid_work);
 		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
 		while (!list_empty(&det->cb_list)) {
 			cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a4888..5dc920f 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -39,7 +39,7 @@
 
 #include "mlx4_ib.h"
 
-#define CM_CLEANUP_CACHE_TIMEOUT  (5 * HZ)
+#define CM_CLEANUP_CACHE_TIMEOUT  (30 * HZ)
 
 struct id_map_entry {
 	struct rb_node node;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 46b6497..49d55a0 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -497,11 +497,6 @@
 	if (unlikely(mapped_segs == mr->mr.max_segs))
 		return -ENOMEM;
 
-	if (mr->mr.length == 0) {
-		mr->mr.user_base = addr;
-		mr->mr.iova = addr;
-	}
-
 	m = mapped_segs / RVT_SEGSZ;
 	n = mapped_segs % RVT_SEGSZ;
 	mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -518,17 +513,24 @@
  * @sg_nents: number of entries in sg
  * @sg_offset: offset in bytes into sg
  *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
  * Return: number of sg elements mapped to the memory region
  */
 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
 		  int sg_nents, unsigned int *sg_offset)
 {
 	struct rvt_mr *mr = to_imr(ibmr);
+	int ret;
 
 	mr->mr.length = 0;
 	mr->mr.page_shift = PAGE_SHIFT;
-	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
-			      rvt_set_page);
+	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+	mr->mr.user_base = ibmr->iova;
+	mr->mr.iova = ibmr->iova;
+	mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+	mr->mr.length = (size_t)ibmr->length;
+	return ret;
 }
 
 /**
@@ -559,6 +561,7 @@
 	ibmr->rkey = key;
 	mr->mr.lkey = key;
 	mr->mr.access_flags = access;
+	mr->mr.iova = ibmr->iova;
 	atomic_set(&mr->mr.lkey_invalid, 0);
 
 	return 0;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 1c206df..827dc57 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2368,8 +2368,19 @@
 	srpt_queue_response(cmd);
 }
 
+/*
+ * This function is called for aborted commands if no response is sent to the
+ * initiator. Make sure that the credits freed by aborting a command are
+ * returned to the initiator the next time a response is sent by incrementing
+ * ch->req_lim_delta.
+ */
 static void srpt_aborted_task(struct se_cmd *cmd)
 {
+	struct srpt_send_ioctx *ioctx = container_of(cmd,
+				struct srpt_send_ioctx, cmd);
+	struct srpt_rdma_ch *ch = ioctx->ch;
+
+	atomic_inc(&ch->req_lim_delta);
 }
 
 static int srpt_queue_status(struct se_cmd *cmd)
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 4401be2..3c53aa5 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
 struct cap11xx_led {
 	struct cap11xx_priv *priv;
 	struct led_classdev cdev;
-	struct work_struct work;
 	u32 reg;
-	enum led_brightness new_brightness;
 };
 #endif
 
@@ -233,30 +231,21 @@
 }
 
 #ifdef CONFIG_LEDS_CLASS
-static void cap11xx_led_work(struct work_struct *work)
-{
-	struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
-	struct cap11xx_priv *priv = led->priv;
-	int value = led->new_brightness;
-
-	/*
-	 * All LEDs share the same duty cycle as this is a HW limitation.
-	 * Brightness levels per LED are either 0 (OFF) and 1 (ON).
-	 */
-	regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
-				BIT(led->reg), value ? BIT(led->reg) : 0);
-}
-
-static void cap11xx_led_set(struct led_classdev *cdev,
-			   enum led_brightness value)
+static int cap11xx_led_set(struct led_classdev *cdev,
+			    enum led_brightness value)
 {
 	struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
+	struct cap11xx_priv *priv = led->priv;
 
-	if (led->new_brightness == value)
-		return;
-
-	led->new_brightness = value;
-	schedule_work(&led->work);
+	/*
+	 * All LEDs share the same duty cycle as this is a HW
+	 * limitation. Brightness levels per LED are either
+	 * 0 (OFF) and 1 (ON).
+	 */
+	return regmap_update_bits(priv->regmap,
+				  CAP11XX_REG_LED_OUTPUT_CONTROL,
+				  BIT(led->reg),
+				  value ? BIT(led->reg) : 0);
 }
 
 static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@
 		led->cdev.default_trigger =
 			of_get_property(child, "linux,default-trigger", NULL);
 		led->cdev.flags = 0;
-		led->cdev.brightness_set = cap11xx_led_set;
+		led->cdev.brightness_set_blocking = cap11xx_led_set;
 		led->cdev.max_brightness = 1;
 		led->cdev.brightness = LED_OFF;
 
@@ -312,8 +301,6 @@
 		led->reg = reg;
 		led->priv = priv;
 
-		INIT_WORK(&led->work, cap11xx_led_work);
-
 		error = devm_led_classdev_register(dev, &led->cdev);
 		if (error) {
 			of_node_put(child);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index c64d874..2e12e31 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -220,7 +220,7 @@
 	keypad->stopped = true;
 	spin_unlock_irq(&keypad->lock);
 
-	flush_work(&keypad->work.work);
+	flush_delayed_work(&keypad->work);
 	/*
 	 * matrix_keypad_scan() will leave IRQs enabled;
 	 * we should disable them now.
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 7544888..b8dbde7 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -156,6 +156,9 @@
 		return error;
 	}
 
+	pdata->input = input;
+	platform_set_drvdata(pdev, pdata);
+
 	error = devm_request_irq(&pdev->dev, pdata->irq,
 			       imx_snvs_pwrkey_interrupt,
 			       0, pdev->name, pdev);
@@ -171,9 +174,6 @@
 		return error;
 	}
 
-	pdata->input = input;
-	platform_set_drvdata(pdev, pdata);
-
 	device_init_wakeup(&pdev->dev, pdata->wakeup);
 
 	return 0;
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index de7be4f..ebf9f64 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@
 
 	input_dev->id.bustype = BUS_HOST;
 
+	keypad_data->input_dev = input_dev;
+
 	error = keypad_matrix_key_parse_dt(keypad_data);
 	if (error)
 		return error;
@@ -168,8 +170,6 @@
 
 	input_set_drvdata(input_dev, keypad_data);
 
-	keypad_data->input_dev = input_dev;
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
old mode 100644
new mode 100755
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index c08ee8f..bd95a6a 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -42,7 +42,6 @@
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)		+= ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
-obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
deleted file mode 100644
index 82fefdf..0000000
--- a/drivers/input/misc/keychord.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- *  drivers/input/misc/keychord.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
-*/
-
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/keychord.h>
-#include <linux/sched.h>
-
-#define KEYCHORD_NAME		"keychord"
-#define BUFFER_SIZE			16
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("Key chord input driver");
-MODULE_SUPPORTED_DEVICE("keychord");
-MODULE_LICENSE("GPL");
-
-#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
-		((char *)kc + sizeof(struct input_keychord) + \
-		kc->count * sizeof(kc->keycodes[0])))
-
-struct keychord_device {
-	struct input_handler	input_handler;
-	int			registered;
-
-	/* list of keychords to monitor */
-	struct input_keychord	*keychords;
-	int			keychord_count;
-
-	/* bitmask of keys contained in our keychords */
-	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
-	/* current state of the keys */
-	unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
-	/* number of keys that are currently pressed */
-	int key_down;
-
-	/* second input_device_id is needed for null termination */
-	struct input_device_id  device_ids[2];
-
-	spinlock_t		lock;
-	wait_queue_head_t	waitq;
-	unsigned char		head;
-	unsigned char		tail;
-	__u16			buff[BUFFER_SIZE];
-	/* Bit to serialize writes to this device */
-#define KEYCHORD_BUSY			0x01
-	unsigned long		flags;
-	wait_queue_head_t	write_waitq;
-};
-
-static int check_keychord(struct keychord_device *kdev,
-		struct input_keychord *keychord)
-{
-	int i;
-
-	if (keychord->count != kdev->key_down)
-		return 0;
-
-	for (i = 0; i < keychord->count; i++) {
-		if (!test_bit(keychord->keycodes[i], kdev->keystate))
-			return 0;
-	}
-
-	/* we have a match */
-	return 1;
-}
-
-static void keychord_event(struct input_handle *handle, unsigned int type,
-			   unsigned int code, int value)
-{
-	struct keychord_device *kdev = handle->private;
-	struct input_keychord *keychord;
-	unsigned long flags;
-	int i, got_chord = 0;
-
-	if (type != EV_KEY || code >= KEY_MAX)
-		return;
-
-	spin_lock_irqsave(&kdev->lock, flags);
-	/* do nothing if key state did not change */
-	if (!test_bit(code, kdev->keystate) == !value)
-		goto done;
-	__change_bit(code, kdev->keystate);
-	if (value)
-		kdev->key_down++;
-	else
-		kdev->key_down--;
-
-	/* don't notify on key up */
-	if (!value)
-		goto done;
-	/* ignore this event if it is not one of the keys we are monitoring */
-	if (!test_bit(code, kdev->keybit))
-		goto done;
-
-	keychord = kdev->keychords;
-	if (!keychord)
-		goto done;
-
-	/* check to see if the keyboard state matches any keychords */
-	for (i = 0; i < kdev->keychord_count; i++) {
-		if (check_keychord(kdev, keychord)) {
-			kdev->buff[kdev->head] = keychord->id;
-			kdev->head = (kdev->head + 1) % BUFFER_SIZE;
-			got_chord = 1;
-			break;
-		}
-		/* skip to next keychord */
-		keychord = NEXT_KEYCHORD(keychord);
-	}
-
-done:
-	spin_unlock_irqrestore(&kdev->lock, flags);
-
-	if (got_chord) {
-		pr_info("keychord: got keychord id %d. Any tasks: %d\n",
-			keychord->id,
-			!list_empty_careful(&kdev->waitq.task_list));
-		wake_up_interruptible(&kdev->waitq);
-	}
-}
-
-static int keychord_connect(struct input_handler *handler,
-					  struct input_dev *dev,
-					  const struct input_device_id *id)
-{
-	int i, ret;
-	struct input_handle *handle;
-	struct keychord_device *kdev =
-		container_of(handler, struct keychord_device, input_handler);
-
-	/*
-	 * ignore this input device if it does not contain any keycodes
-	 * that we are monitoring
-	 */
-	for (i = 0; i < KEY_MAX; i++) {
-		if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
-			break;
-	}
-	if (i == KEY_MAX)
-		return -ENODEV;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	handle->dev = dev;
-	handle->handler = handler;
-	handle->name = KEYCHORD_NAME;
-	handle->private = kdev;
-
-	ret = input_register_handle(handle);
-	if (ret)
-		goto err_input_register_handle;
-
-	ret = input_open_device(handle);
-	if (ret)
-		goto err_input_open_device;
-
-	pr_info("keychord: using input dev %s for fevent\n", dev->name);
-	return 0;
-
-err_input_open_device:
-	input_unregister_handle(handle);
-err_input_register_handle:
-	kfree(handle);
-	return ret;
-}
-
-static void keychord_disconnect(struct input_handle *handle)
-{
-	input_close_device(handle);
-	input_unregister_handle(handle);
-	kfree(handle);
-}
-
-/*
- * keychord_read is used to read keychord events from the driver
- */
-static ssize_t keychord_read(struct file *file, char __user *buffer,
-		size_t count, loff_t *ppos)
-{
-	struct keychord_device *kdev = file->private_data;
-	__u16   id;
-	int retval;
-	unsigned long flags;
-
-	if (count < sizeof(id))
-		return -EINVAL;
-	count = sizeof(id);
-
-	if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
-		return -EAGAIN;
-
-	retval = wait_event_interruptible(kdev->waitq,
-			kdev->head != kdev->tail);
-	if (retval)
-		return retval;
-
-	spin_lock_irqsave(&kdev->lock, flags);
-	/* pop a keychord ID off the queue */
-	id = kdev->buff[kdev->tail];
-	kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
-	spin_unlock_irqrestore(&kdev->lock, flags);
-
-	if (copy_to_user(buffer, &id, count))
-		return -EFAULT;
-
-	return count;
-}
-
-/*
- * serializes writes on a device. can use mutex_lock_interruptible()
- * for this particular use case as well - a matter of preference.
- */
-static int
-keychord_write_lock(struct keychord_device *kdev)
-{
-	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&kdev->lock, flags);
-	while (kdev->flags & KEYCHORD_BUSY) {
-		spin_unlock_irqrestore(&kdev->lock, flags);
-		ret = wait_event_interruptible(kdev->write_waitq,
-			       ((kdev->flags & KEYCHORD_BUSY) == 0));
-		if (ret)
-			return ret;
-		spin_lock_irqsave(&kdev->lock, flags);
-	}
-	kdev->flags |= KEYCHORD_BUSY;
-	spin_unlock_irqrestore(&kdev->lock, flags);
-	return 0;
-}
-
-static void
-keychord_write_unlock(struct keychord_device *kdev)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&kdev->lock, flags);
-	kdev->flags &= ~KEYCHORD_BUSY;
-	spin_unlock_irqrestore(&kdev->lock, flags);
-	wake_up_interruptible(&kdev->write_waitq);
-}
-
-/*
- * keychord_write is used to configure the driver
- */
-static ssize_t keychord_write(struct file *file, const char __user *buffer,
-		size_t count, loff_t *ppos)
-{
-	struct keychord_device *kdev = file->private_data;
-	struct input_keychord *keychords = 0;
-	struct input_keychord *keychord;
-	int ret, i, key;
-	unsigned long flags;
-	size_t resid = count;
-	size_t key_bytes;
-
-	if (count < sizeof(struct input_keychord) || count > PAGE_SIZE)
-		return -EINVAL;
-	keychords = kzalloc(count, GFP_KERNEL);
-	if (!keychords)
-		return -ENOMEM;
-
-	/* read list of keychords from userspace */
-	if (copy_from_user(keychords, buffer, count)) {
-		kfree(keychords);
-		return -EFAULT;
-	}
-
-	/*
-	 * Serialize writes to this device to prevent various races.
-	 * 1) writers racing here could do duplicate input_unregister_handler()
-	 *    calls, resulting in attempting to unlink a node from a list that
-	 *    does not exist.
-	 * 2) writers racing here could do duplicate input_register_handler() calls
-	 *    below, resulting in a duplicate insertion of a node into the list.
-	 * 3) a double kfree of keychords can occur (in the event that
-	 *    input_register_handler() fails below.
-	 */
-	ret = keychord_write_lock(kdev);
-	if (ret) {
-		kfree(keychords);
-		return ret;
-	}
-
-	/* unregister handler before changing configuration */
-	if (kdev->registered) {
-		input_unregister_handler(&kdev->input_handler);
-		kdev->registered = 0;
-	}
-
-	spin_lock_irqsave(&kdev->lock, flags);
-	/* clear any existing configuration */
-	kfree(kdev->keychords);
-	kdev->keychords = 0;
-	kdev->keychord_count = 0;
-	kdev->key_down = 0;
-	memset(kdev->keybit, 0, sizeof(kdev->keybit));
-	memset(kdev->keystate, 0, sizeof(kdev->keystate));
-	kdev->head = kdev->tail = 0;
-
-	keychord = keychords;
-
-	while (resid > 0) {
-		/* Is the entire keychord entry header present ? */
-		if (resid < sizeof(struct input_keychord)) {
-			pr_err("keychord: Insufficient bytes present for header %zu\n",
-			       resid);
-			goto err_unlock_return;
-		}
-		resid -= sizeof(struct input_keychord);
-		if (keychord->count <= 0) {
-			pr_err("keychord: invalid keycode count %d\n",
-				keychord->count);
-			goto err_unlock_return;
-		}
-		key_bytes = keychord->count * sizeof(keychord->keycodes[0]);
-		/* Do we have all the expected keycodes ? */
-		if (resid < key_bytes) {
-			pr_err("keychord: Insufficient bytes present for keycount %zu\n",
-			       resid);
-			goto err_unlock_return;
-		}
-		resid -= key_bytes;
-
-		if (keychord->version != KEYCHORD_VERSION) {
-			pr_err("keychord: unsupported version %d\n",
-				keychord->version);
-			goto err_unlock_return;
-		}
-
-		/* keep track of the keys we are monitoring in keybit */
-		for (i = 0; i < keychord->count; i++) {
-			key = keychord->keycodes[i];
-			if (key < 0 || key >= KEY_CNT) {
-				pr_err("keychord: keycode %d out of range\n",
-					key);
-				goto err_unlock_return;
-			}
-			__set_bit(key, kdev->keybit);
-		}
-
-		kdev->keychord_count++;
-		keychord = NEXT_KEYCHORD(keychord);
-	}
-
-	kdev->keychords = keychords;
-	spin_unlock_irqrestore(&kdev->lock, flags);
-
-	ret = input_register_handler(&kdev->input_handler);
-	if (ret) {
-		kfree(keychords);
-		kdev->keychords = 0;
-		keychord_write_unlock(kdev);
-		return ret;
-	}
-	kdev->registered = 1;
-
-	keychord_write_unlock(kdev);
-
-	return count;
-
-err_unlock_return:
-	spin_unlock_irqrestore(&kdev->lock, flags);
-	kfree(keychords);
-	keychord_write_unlock(kdev);
-	return -EINVAL;
-}
-
-static unsigned int keychord_poll(struct file *file, poll_table *wait)
-{
-	struct keychord_device *kdev = file->private_data;
-
-	poll_wait(file, &kdev->waitq, wait);
-
-	if (kdev->head != kdev->tail)
-		return POLLIN | POLLRDNORM;
-
-	return 0;
-}
-
-static int keychord_open(struct inode *inode, struct file *file)
-{
-	struct keychord_device *kdev;
-
-	kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
-	if (!kdev)
-		return -ENOMEM;
-
-	spin_lock_init(&kdev->lock);
-	init_waitqueue_head(&kdev->waitq);
-	init_waitqueue_head(&kdev->write_waitq);
-
-	kdev->input_handler.event = keychord_event;
-	kdev->input_handler.connect = keychord_connect;
-	kdev->input_handler.disconnect = keychord_disconnect;
-	kdev->input_handler.name = KEYCHORD_NAME;
-	kdev->input_handler.id_table = kdev->device_ids;
-
-	kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
-	__set_bit(EV_KEY, kdev->device_ids[0].evbit);
-
-	file->private_data = kdev;
-
-	return 0;
-}
-
-static int keychord_release(struct inode *inode, struct file *file)
-{
-	struct keychord_device *kdev = file->private_data;
-
-	if (kdev->registered)
-		input_unregister_handler(&kdev->input_handler);
-	kfree(kdev->keychords);
-	kfree(kdev);
-
-	return 0;
-}
-
-static const struct file_operations keychord_fops = {
-	.owner		= THIS_MODULE,
-	.open		= keychord_open,
-	.release	= keychord_release,
-	.read		= keychord_read,
-	.write		= keychord_write,
-	.poll		= keychord_poll,
-};
-
-static struct miscdevice keychord_misc = {
-	.fops		= &keychord_fops,
-	.name		= KEYCHORD_NAME,
-	.minor		= MISC_DYNAMIC_MINOR,
-};
-
-static int __init keychord_init(void)
-{
-	return misc_register(&keychord_misc);
-}
-
-static void __exit keychord_exit(void)
-{
-	misc_deregister(&keychord_misc);
-}
-
-module_init(keychord_init);
-module_exit(keychord_exit);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312..65038dc 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -772,7 +772,7 @@
 
 	error = rmi_register_function(fn);
 	if (error)
-		goto err_put_fn;
+		return error;
 
 	if (pdt->function_number == 0x01)
 		data->f01_container = fn;
@@ -780,10 +780,6 @@
 	list_add_tail(&fn->node, &data->function_list);
 
 	return RMI_SCAN_CONTINUE;
-
-err_put_fn:
-	put_device(&fn->dev);
-	return error;
 }
 
 int rmi_driver_suspend(struct rmi_device *rmi_dev)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f42..275f957 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1198,7 +1198,7 @@
 		ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
 
 	rc = f11_write_control_regs(fn, &f11->sens_query,
-			   &f11->dev_controls, fn->fd.query_base_addr);
+			   &f11->dev_controls, fn->fd.control_base_addr);
 	if (rc)
 		dev_warn(&fn->dev, "Failed to write control registers\n");
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a259ef3..c9c8c50 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -238,6 +238,137 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called cyttsp4_spi.
 
+config TOUCHSCREEN_CYPRESS_CYTTSP5
+	tristate "Parade TrueTouch Gen5 Touchscreen Driver"
+	default y
+	help
+	  Core driver for Parade TrueTouch(tm) Standard Product
+	  Geneartion5 touchscreen controllers.
+
+	  Say Y here if you have a Parade Gen5 touchscreen.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cyttsp5.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	bool "Enable Device Tree support"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5 && OF
+	default y
+	help
+	  Devicetree parsor for Parade TrueTouch(tm) Standard Product
+	  Geneartion5 touchscreen controllers. Say Y here to enable
+	  support for device tree.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_I2C
+	tristate "Parade TrueTouch Gen5 I2C"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	select I2C
+	default y
+	help
+	  Say Y here to enable I2C bus interface to Parade
+	  TrueTouch(tm) Standard Product Generation5 touchscreen
+	  controller.
+
+	  If unsure, say Y.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cyttsp5_i2c.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_SPI
+	tristate "Parade TrueTouch Gen5 SPI"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	select SPI
+	help
+	  Say Y here to enable SPI bus interface to Parade
+	  TrueTouch(tm) Standard Product Generation5 touchscreen
+	  controller.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cyttsp5_spi.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_MT_A
+	bool "Parade TrueTouch Gen5 MultiTouch Protocol A"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	help
+	  This option controls which MultiTouch protocol will be
+	  used to report the touch events. This depends on your
+	  usage.
+
+	  Select to enable MultiTouch touch reporting using protocol A
+	  on Parade TrueTouch(tm) Standard Product Generation4 touchscreen
+	  controller.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_MT_B
+	bool "Parade TrueTouch Gen5 MultiTouch Protocol B"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	default y
+	help
+	  This option controls which MultiTouch protocol will be
+	  used to report the touch events. This depends on your
+	  usage.
+
+	  Select to enable MultiTouch touch reporting using protocol B
+	  on Parade TrueTouch(tm) Standard Product Generation4 touchscreen
+	  controller.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_BUTTON
+	bool "Parade TrueTouch Gen5 MultiTouch CapSense Button"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	default y
+	help
+	  Say Y here to enable CapSense reporting on Parade
+	  TrueTouch(tm) Standard Product Generation5 touchscreen
+	  controller.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_PROXIMITY
+	bool "Parade TrueTouch Gen5 Proximity"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	default y
+	help
+	  Say Y here to enable proximity reporting on Parade
+	  TrueTouch(tm) Standard Product Generation5 touchscreen
+	  controller.
+
+	  If unsure, say N.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICE_ACCESS
+	tristate "Parade TrueTouch Gen5 MultiTouch Device Access"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5
+	default y
+	help
+	  Say Y here to enable Parade TrueTouch(tm) Standard Product
+	  Generation5 touchscreen controller device access module.
+
+	  This modules adds an interface to access touchscreen
+	  controller using driver sysfs nodes.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cyttsp5_device_access.
+
+config TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICE_ACCESS_API
+	bool "Enable Device Access kernel API"
+	depends on TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICE_ACCESS
+	help
+	  Say Y here to enable Device access kernel API which provides
+	  access to Parade TrueTouch(tm) Standard Product Generation5
+	  touchscreen controller for other modules.
+
+	  If unsure, say N.
+
 config TOUCHSCREEN_DA9034
 	tristate "Touchscreen support for Dialog Semiconductor DA9034"
 	depends on PMIC_DA903X
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 2579346..a76b17f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_CORE)	+= cyttsp4_core.o
 obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_I2C)	+= cyttsp4_i2c.o cyttsp_i2c_common.o
 obj-$(CONFIG_TOUCHSCREEN_CYTTSP4_SPI)	+= cyttsp4_spi.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5)	+= cyttsp5/
 obj-$(CONFIG_TOUCHSCREEN_DA9034)	+= da9034-ts.o
 obj-$(CONFIG_TOUCHSCREEN_DA9052)	+= da9052_tsi.o
 obj-$(CONFIG_TOUCHSCREEN_DYNAPRO)	+= dynapro.o
diff --git a/drivers/input/touchscreen/cyttsp5/Makefile b/drivers/input/touchscreen/cyttsp5/Makefile
new file mode 100644
index 0000000..85bb810
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/Makefile
@@ -0,0 +1,49 @@
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5) += cyttsp5.o
+cyttsp5-y := cyttsp5_core.o cyttsp5_mt_common.o
+cyttsp5-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MT_A) += cyttsp5_mta.o
+cyttsp5-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MT_B) += cyttsp5_mtb.o
+cyttsp5-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BUTTON) += cyttsp5_btn.o
+cyttsp5-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PROXIMITY) += cyttsp5_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT) += cyttsp5_devtree.o
+ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5
+obj-y += cyttsp5_platform.o
+endif
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_I2C) += cyttsp5_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_SPI) += cyttsp5_spi.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEBUG_MDL) += cyttsp5_debug.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5) += cyttsp5_loader.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICE_ACCESS) += cyttsp5_device_access.o
+obj-$(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_TEST_DEVICE_ACCESS_API) += cyttsp5_test_device_access_api.o
+
+ifeq ($(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEBUG),y)
+CFLAGS_cyttsp5_core.o += -DDEBUG
+CFLAGS_cyttsp5_i2c.o += -DDEBUG
+CFLAGS_cyttsp5_spi.o += -DDEBUG
+CFLAGS_cyttsp5_mta.o += -DDEBUG
+CFLAGS_cyttsp5_mtb.o += -DDEBUG
+CFLAGS_cyttsp5_mt_common.o += -DDEBUG
+CFLAGS_cyttsp5_btn.o += -DDEBUG
+CFLAGS_cyttsp5_proximity.o += -DDEBUG
+CFLAGS_cyttsp5_device_access.o += -DDEBUG
+CFLAGS_cyttsp5_loader.o += -DDEBUG
+CFLAGS_cyttsp5_debug.o += -DDEBUG
+CFLAGS_cyttsp5_devtree.o += -DDEBUG
+CFLAGS_cyttsp5_platform.o += -DDEBUG
+endif
+ifeq ($(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_VDEBUG),y)
+CFLAGS_cyttsp5_core.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_i2c.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_spi.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_mta.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_mtb.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_mt_common.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_btn.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_proximity.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_device_access.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_loader.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_debug.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_devtree.o += -DVERBOSE_DEBUG
+CFLAGS_cyttsp5_platform.o += -DVERBOSE_DEBUG
+endif
+
+
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_btn.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_btn.c
new file mode 100644
index 0000000..0150990
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_btn.c
@@ -0,0 +1,366 @@
+/*
+ * cyttsp5_btn.c
+ * Parade TrueTouch(TM) Standard Product V5 CapSense Reports Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+#define CYTTSP5_BTN_NAME "cyttsp5_btn"
+
+static inline void cyttsp5_btn_key_action(struct cyttsp5_btn_data *bd,
+	int btn_no, int btn_state)
+{
+	struct device *dev = bd->dev;
+	struct cyttsp5_sysinfo *si = bd->si;
+
+	if (!si->btn[btn_no].enabled || si->btn[btn_no].state == btn_state)
+		return;
+
+	si->btn[btn_no].state = btn_state;
+	input_report_key(bd->input, si->btn[btn_no].key_code, btn_state);
+	input_sync(bd->input);
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: btn=%d key_code=%d %s\n",
+		__func__, btn_no, si->btn[btn_no].key_code,
+		btn_state == CY_BTN_PRESSED ? "PRESSED" : "RELEASED");
+}
+
+static void cyttsp5_get_btn_touches(struct cyttsp5_btn_data *bd)
+{
+	struct cyttsp5_sysinfo *si = bd->si;
+	int num_btns = si->num_btns;
+	int cur_btn;
+	int cur_btn_state;
+
+	for (cur_btn = 0; cur_btn < num_btns; cur_btn++) {
+		/* Get current button state */
+		cur_btn_state = (si->xy_data[0] >> (cur_btn * CY_BITS_PER_BTN))
+				& CY_NUM_BTN_EVENT_ID;
+
+		cyttsp5_btn_key_action(bd, cur_btn, cur_btn_state);
+	}
+}
+
+static void cyttsp5_btn_lift_all(struct cyttsp5_btn_data *bd)
+{
+	struct cyttsp5_sysinfo *si = bd->si;
+	int i;
+
+	if (!si || si->num_btns == 0)
+		return;
+
+	for (i = 0; i < si->num_btns; i++)
+		cyttsp5_btn_key_action(bd, i, CY_BTN_RELEASED);
+}
+
+#ifdef VERBOSE_DEBUG
+static void cyttsp5_log_btn_data(struct cyttsp5_btn_data *bd)
+{
+	struct device *dev = bd->dev;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	u8 *pr_buf = cd->pr_buf;
+	struct cyttsp5_sysinfo *si = bd->si;
+	int cur;
+	int value;
+
+	for (cur = 0; cur < si->num_btns; cur++) {
+		pr_buf[0] = 0;
+		if (si->xy_data[0] & (1 << cur))
+			value = 1;
+		else
+			value = 0;
+		snprintf(pr_buf, CY_MAX_PRBUF_SIZE, "btn_rec[%d]=0x", cur);
+		snprintf(pr_buf, CY_MAX_PRBUF_SIZE, "%s%X (%02X)",
+			pr_buf, value, le16_to_cpu(si->xy_data[1 + cur * 2]));
+
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: %s\n", __func__, pr_buf);
+	}
+}
+#endif
+
+/* read xy_data for all current CapSense button touches */
+static int cyttsp5_xy_worker(struct cyttsp5_btn_data *bd)
+{
+	struct cyttsp5_sysinfo *si = bd->si;
+
+	/* extract button press/release touch information */
+	if (si->num_btns > 0) {
+		cyttsp5_get_btn_touches(bd);
+#ifdef VERBOSE_DEBUG
+		/* log button press/release touch information */
+		cyttsp5_log_btn_data(bd);
+#endif
+	}
+
+	return 0;
+}
+
+static int cyttsp5_btn_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+	int rc;
+
+	if (bd->si->xy_mode[2] != bd->si->desc.btn_report_id)
+		return 0;
+
+	/* core handles handshake */
+	mutex_lock(&bd->btn_lock);
+	rc = cyttsp5_xy_worker(bd);
+	mutex_unlock(&bd->btn_lock);
+	if (rc < 0)
+		dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int cyttsp5_startup_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	mutex_lock(&bd->btn_lock);
+	cyttsp5_btn_lift_all(bd);
+	mutex_unlock(&bd->btn_lock);
+
+	return 0;
+}
+
+static int cyttsp5_btn_suspend_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	mutex_lock(&bd->btn_lock);
+	cyttsp5_btn_lift_all(bd);
+	bd->is_suspended = true;
+	mutex_unlock(&bd->btn_lock);
+
+	pm_runtime_put(dev);
+
+	return 0;
+}
+
+static int cyttsp5_btn_resume_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	pm_runtime_get(dev);
+
+	mutex_lock(&bd->btn_lock);
+	bd->is_suspended = false;
+	mutex_unlock(&bd->btn_lock);
+
+	return 0;
+}
+
+static int cyttsp5_btn_open(struct input_dev *input)
+{
+	struct device *dev = input->dev.parent;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&bd->btn_lock);
+	bd->is_suspended = false;
+	mutex_unlock(&bd->btn_lock);
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: setup subscriptions\n", __func__);
+
+	/* set up touch call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_attention, CY_MODE_OPERATIONAL);
+
+	/* set up startup call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_BTN_NAME,
+		cyttsp5_startup_attention, 0);
+
+	/* set up suspend call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_SUSPEND, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_suspend_attention, 0);
+
+	/* set up resume call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_RESUME, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_resume_attention, 0);
+
+	return 0;
+}
+
+static void cyttsp5_btn_close(struct input_dev *input)
+{
+	struct device *dev = input->dev.parent;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_attention, CY_MODE_OPERATIONAL);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_BTN_NAME,
+		cyttsp5_startup_attention, 0);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_SUSPEND, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_suspend_attention, 0);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_RESUME, CYTTSP5_BTN_NAME,
+		cyttsp5_btn_resume_attention, 0);
+
+	mutex_lock(&bd->btn_lock);
+	if (!bd->is_suspended) {
+		pm_runtime_put(dev);
+		bd->is_suspended = true;
+	}
+	mutex_unlock(&bd->btn_lock);
+}
+
+static int cyttsp5_setup_input_device(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+	int i;
+	int rc;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Initialize event signals\n",
+		__func__);
+	__set_bit(EV_KEY, bd->input->evbit);
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Number of buttons %d\n",
+		__func__, bd->si->num_btns);
+	for (i = 0; i < bd->si->num_btns; i++) {
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: btn:%d keycode:%d\n",
+			__func__, i, bd->si->btn[i].key_code);
+		__set_bit(bd->si->btn[i].key_code, bd->input->keybit);
+	}
+
+	rc = input_register_device(bd->input);
+	if (rc < 0)
+		dev_err(dev, "%s: Error, failed register input device r=%d\n",
+			__func__, rc);
+	else
+		bd->input_device_registered = true;
+
+	return rc;
+}
+
+static int cyttsp5_setup_input_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+	int rc;
+
+	bd->si = _cyttsp5_request_sysinfo(dev);
+	if (!bd->si)
+		return -EPERM;
+
+	rc = cyttsp5_setup_input_device(dev);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_BTN_NAME,
+		cyttsp5_setup_input_attention, 0);
+
+	return rc;
+}
+
+int cyttsp5_btn_probe(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	struct cyttsp5_btn_platform_data *btn_pdata;
+	int rc = 0;
+
+	if (!pdata || !pdata->btn_pdata) {
+		dev_err(dev, "%s: Missing platform data\n", __func__);
+		rc = -ENODEV;
+		goto error_no_pdata;
+	}
+	btn_pdata = pdata->btn_pdata;
+
+	mutex_init(&bd->btn_lock);
+	bd->dev = dev;
+	bd->pdata = btn_pdata;
+
+	/* Create the input device and register it. */
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Create the input device and register it\n", __func__);
+	bd->input = input_allocate_device();
+	if (!bd->input) {
+		dev_err(dev, "%s: Error, failed to allocate input device\n",
+			__func__);
+		rc = -ENODEV;
+		goto error_alloc_failed;
+	}
+
+	if (bd->pdata->inp_dev_name)
+		bd->input->name = bd->pdata->inp_dev_name;
+	else
+		bd->input->name = CYTTSP5_BTN_NAME;
+	scnprintf(bd->phys, sizeof(bd->phys), "%s/input%d", dev_name(dev),
+			cd->phys_num++);
+	bd->input->phys = bd->phys;
+	bd->input->dev.parent = bd->dev;
+	bd->input->open = cyttsp5_btn_open;
+	bd->input->close = cyttsp5_btn_close;
+	input_set_drvdata(bd->input, bd);
+
+	/* get sysinfo */
+	bd->si = _cyttsp5_request_sysinfo(dev);
+
+	if (bd->si) {
+		rc = cyttsp5_setup_input_device(dev);
+		if (rc)
+			goto error_init_input;
+	} else {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core p=%pK\n",
+			__func__, bd->si);
+		_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_BTN_NAME, cyttsp5_setup_input_attention, 0);
+	}
+
+	return 0;
+
+error_init_input:
+	input_free_device(bd->input);
+error_alloc_failed:
+error_no_pdata:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+int cyttsp5_btn_release(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_btn_data *bd = &cd->bd;
+
+	if (bd->input_device_registered)
+		input_unregister_device(bd->input);
+	else {
+		input_free_device(bd->input);
+		_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_BTN_NAME, cyttsp5_setup_input_attention, 0);
+	}
+
+	return 0;
+}
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_core.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_core.c
new file mode 100644
index 0000000..a8f1df3
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_core.c
@@ -0,0 +1,6455 @@
+/*
+ * cyttsp5_core.c
+ * Parade TrueTouch(TM) Standard Product V5 Core Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/mt.h>
+#include <linux/regulator/machine.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/power_supply.h>
+#include "cyttsp5_regs.h"
+
+#define CY_GES_WAKEUP
+#define CY_CORE_STARTUP_RETRY_COUNT 3
+
+#define PINCTRL_STATE_ACTIVE    "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE   "pmx_ts_release"
+
+#define FT_VTG_MIN_UV           2600000
+#define FT_VTG_MAX_UV           3300000
+#define FT_I2C_VTG_MIN_UV       1800000
+#define FT_I2C_VTG_MAX_UV       1800000
+
+#define USE_FB_SUSPEND_RESUME
+#define PATCH_WAKEUP_NACK_ISSUE
+
+MODULE_FIRMWARE(CY_FW_FILE_NAME);
+
+static const char *cy_driver_core_name = CYTTSP5_CORE_NAME;
+static const char *cy_driver_core_version = CY_DRIVER_VERSION;
+static const char *cy_driver_core_date = CY_DRIVER_DATE;
+static bool cyttsp5_first_probe = true;
+static bool is_cyttsp5_probe_success;
+static const struct cyttsp5_bus_ops *cyttsp5_bus_ops_save;
+
+struct cyttsp5_hid_field {
+	int report_count;
+	int report_size;
+	int size; /* report_count * report_size */
+	int offset;
+	int data_type;
+	int logical_min;
+	int logical_max;
+	/* Usage Page (Hi 16 bit) + Usage (Lo 16 bit) */
+	u32 usage_page;
+	u32 collection_usage_pages[CY_HID_MAX_COLLECTIONS];
+	struct cyttsp5_hid_report *report;
+	bool record_field;
+};
+
+struct cyttsp5_hid_report {
+	u8 id;
+	u8 type;
+	int size;
+	struct cyttsp5_hid_field *fields[CY_HID_MAX_FIELDS];
+	int num_fields;
+	int record_field_index;
+	int header_size;
+	int record_size;
+	u32 usage_page;
+};
+
+struct atten_node {
+	struct list_head node;
+	char *id;
+	struct device *dev;
+
+	int (*func)(struct device *);
+	int mode;
+};
+
+struct param_node {
+	struct list_head node;
+	u8 id;
+	u32 value;
+	u8 size;
+};
+
+struct module_node {
+	struct list_head node;
+	struct cyttsp5_module *module;
+	void *data;
+};
+
+struct cyttsp5_hid_cmd {
+	u8 opcode;
+	u8 report_type;
+	union {
+		u8 report_id;
+		u8 power_state;
+	};
+	u8 has_data_register;
+	size_t write_length;
+	u8 *write_buf;
+	u8 *read_buf;
+	u8 wait_interrupt;
+	u8 reset_cmd;
+	u16 timeout_ms;
+};
+
+struct cyttsp5_hid_output {
+	u8 cmd_type;
+	u16 length;
+	u8 command_code;
+	size_t write_length;
+	u8 *write_buf;
+	u8 novalidate;
+	u8 reset_expected;
+	u16 timeout_ms;
+};
+
+#define SET_CMD_OPCODE(byte, opcode) SET_CMD_LOW(byte, opcode)
+#define SET_CMD_REPORT_TYPE(byte, type) SET_CMD_HIGH(byte, ((type) << 4))
+#define SET_CMD_REPORT_ID(byte, id) SET_CMD_LOW(byte, id)
+
+#define HID_OUTPUT_APP_COMMAND(command) \
+	.cmd_type = HID_OUTPUT_CMD_APP, \
+	.command_code = command
+
+#define HID_OUTPUT_BL_COMMAND(command) \
+	.cmd_type = HID_OUTPUT_CMD_BL, \
+	.command_code = command
+
+#ifdef VERBOSE_DEBUG
+void cyttsp5_pr_buf(struct device *dev, u8 *dptr, int size,
+		const char *data_name)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	u8 *pr_buf = cd->pr_buf;
+	int i, k;
+	const char fmt[] = "%02X ";
+	int max;
+
+	if (!size)
+		return;
+
+	max = (CY_MAX_PRBUF_SIZE - 1) - sizeof(CY_PR_TRUNCATED);
+
+	pr_buf[0] = 0;
+	for (i = k = 0; i < size && k < max; i++, k += 3)
+		scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, dptr[i]);
+
+	if (size)
+		parade_debug(dev, DEBUG_LEVEL_2, "%s:  %s[0..%d]=%s%s\n",
+			__func__, data_name,
+			size - 1, pr_buf, size <= max ? "" : CY_PR_TRUNCATED);
+	else
+		parade_debug(dev, DEBUG_LEVEL_2, "%s:  %s[]\n",
+			__func__, data_name);
+
+}
+EXPORT_SYMBOL_GPL(cyttsp5_pr_buf);
+#endif
+
+#ifdef TTHE_TUNER_SUPPORT
+static int tthe_print(struct cyttsp5_core_data *cd, u8 *buf, int buf_len,
+		const u8 *data_name)
+{
+	int len = strlen(data_name);
+	int i, n;
+	u8 *p;
+	int remain;
+	u8 data_name_with_time_stamp[100];
+
+	if (cd->show_timestamp) {
+		snprintf(data_name_with_time_stamp,
+			ARRAY_SIZE(data_name_with_time_stamp), "[%u] %s",
+			jiffies_to_msecs(jiffies), data_name);
+		data_name = data_name_with_time_stamp;
+		len = strlen(data_name);
+	}
+
+	mutex_lock(&cd->tthe_lock);
+	if (!cd->tthe_buf)
+		goto exit;
+
+	if (cd->tthe_buf_len + (len + buf_len) > CY_MAX_PRBUF_SIZE)
+		goto exit;
+
+	if (len + buf_len == 0)
+		goto exit;
+
+	remain = CY_MAX_PRBUF_SIZE - cd->tthe_buf_len;
+	if (remain < len)
+		len = remain;
+
+	p = cd->tthe_buf + cd->tthe_buf_len;
+	memcpy(p, data_name, len);
+	cd->tthe_buf_len += len;
+	p += len;
+	remain -= len;
+
+	*p = 0;
+	for (i = 0; i < buf_len; i++) {
+		n = scnprintf(p, remain, "%02X ", buf[i]);
+		if (!n)
+			break;
+		p += n;
+		remain -= n;
+		cd->tthe_buf_len += n;
+	}
+
+	n = scnprintf(p, remain, "\n");
+	if (!n)
+		cd->tthe_buf[cd->tthe_buf_len] = 0;
+	cd->tthe_buf_len += n;
+	wake_up(&cd->wait_q);
+exit:
+	mutex_unlock(&cd->tthe_lock);
+	return 0;
+}
+
+static int _cyttsp5_request_tthe_print(struct device *dev, u8 *buf,
+		int buf_len, const u8 *data_name)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return tthe_print(cd, buf, buf_len, data_name);
+}
+#endif
+
+/*
+ * cyttsp5_platform_detect_read()
+ *
+ * This function is passed to platform detect
+ * function to perform a read operation
+ */
+static int cyttsp5_platform_detect_read(struct device *dev, void *buf, int size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return cyttsp5_adap_read_default(cd, buf, size);
+}
+
+/* Must be called with cd->hid_report_lock acquired */
+static struct cyttsp5_hid_report *cyttsp5_get_hid_report_(
+		struct cyttsp5_core_data *cd, u8 report_type, u8 report_id,
+		bool create)
+{
+	struct cyttsp5_hid_report *report = NULL;
+	int i;
+
+	/* Look for created reports */
+	for (i = 0; i < cd->num_hid_reports; i++)
+		if (cd->hid_reports[i]->type == report_type
+				&& cd->hid_reports[i]->id == report_id)
+			return cd->hid_reports[i];
+
+	/* Create a new report */
+	if (create && cd->num_hid_reports < CY_HID_MAX_REPORTS) {
+		report = kzalloc(sizeof(struct cyttsp5_hid_report),
+				GFP_KERNEL);
+		if (!report)
+			return NULL;
+
+		report->type = report_type;
+		report->id = report_id;
+		cd->hid_reports[cd->num_hid_reports++] = report;
+	}
+
+	return report;
+}
+
+/* Must be called with cd->hid_report_lock acquired */
+static void cyttsp5_free_hid_reports_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_report *report;
+	int i, j;
+
+	for (i = 0; i < cd->num_hid_reports; i++) {
+		report = cd->hid_reports[i];
+		for (j = 0; j < report->num_fields; j++)
+			kfree(report->fields[j]);
+		kfree(report);
+		cd->hid_reports[i] = NULL;
+	}
+
+	cd->num_hid_reports = 0;
+}
+
+static void cyttsp5_free_hid_reports(struct cyttsp5_core_data *cd)
+{
+	mutex_lock(&cd->hid_report_lock);
+	cyttsp5_free_hid_reports_(cd);
+	mutex_unlock(&cd->hid_report_lock);
+}
+
+/* Must be called with cd->hid_report_lock acquired */
+static struct cyttsp5_hid_field *cyttsp5_create_hid_field_(
+		struct cyttsp5_hid_report *report)
+{
+	struct cyttsp5_hid_field *field;
+
+	if (!report)
+		return NULL;
+
+	if (report->num_fields == CY_HID_MAX_FIELDS)
+		return NULL;
+
+	field = kzalloc(sizeof(struct cyttsp5_hid_field), GFP_KERNEL);
+	if (!field)
+		return NULL;
+
+	field->report = report;
+
+	report->fields[report->num_fields++] = field;
+
+	return field;
+}
+
+static int cyttsp5_add_parameter(struct cyttsp5_core_data *cd,
+		u8 param_id, u32 param_value, u8 param_size)
+{
+	struct param_node *param, *param_new;
+
+	/* Check if parameter exists */
+	spin_lock(&cd->spinlock);
+	list_for_each_entry(param, &cd->param_list, node) {
+		if (param->id == param_id) {
+			/* Update parameter */
+			param->value = param_value;
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: Update parameter id:%d value:%d size:%d\n",
+				 __func__, param_id, param_value, param_size);
+			goto exit_unlock;
+		}
+	}
+	spin_unlock(&cd->spinlock);
+
+	param_new = kzalloc(sizeof(*param_new), GFP_KERNEL);
+	if (!param_new)
+		return -ENOMEM;
+
+	param_new->id = param_id;
+	param_new->value = param_value;
+	param_new->size = param_size;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2,
+		"%s: Add parameter id:%d value:%d size:%d\n",
+		__func__, param_id, param_value, param_size);
+
+	spin_lock(&cd->spinlock);
+	list_add(&param_new->node, &cd->param_list);
+exit_unlock:
+	spin_unlock(&cd->spinlock);
+
+	return 0;
+}
+
+int request_exclusive(struct cyttsp5_core_data *cd, void *ownptr,
+		int timeout_ms)
+{
+	int t = msecs_to_jiffies(timeout_ms);
+	bool with_timeout = (timeout_ms != 0);
+
+	mutex_lock(&cd->system_lock);
+	if (!cd->exclusive_dev && cd->exclusive_waits == 0) {
+		cd->exclusive_dev = ownptr;
+		goto exit;
+	}
+
+	cd->exclusive_waits++;
+wait:
+	mutex_unlock(&cd->system_lock);
+	if (with_timeout) {
+		t = wait_event_timeout(cd->wait_q, !cd->exclusive_dev, t);
+		if (IS_TMO(t)) {
+			dev_err(cd->dev, "%s: tmo waiting exclusive access\n",
+				__func__);
+			return -ETIME;
+		}
+	} else
+		wait_event(cd->wait_q, !cd->exclusive_dev);
+
+	mutex_lock(&cd->system_lock);
+	if (cd->exclusive_dev)
+		goto wait;
+	cd->exclusive_dev = ownptr;
+	cd->exclusive_waits--;
+exit:
+	mutex_unlock(&cd->system_lock);
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: request_exclusive ok=%pK\n",
+		__func__, ownptr);
+
+	return 0;
+}
+
+static int release_exclusive_(struct cyttsp5_core_data *cd, void *ownptr)
+{
+	if (cd->exclusive_dev != ownptr)
+		return -EINVAL;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: exclusive_dev %pK freed\n",
+		__func__, cd->exclusive_dev);
+	cd->exclusive_dev = NULL;
+	wake_up(&cd->wait_q);
+	return 0;
+}
+
+/*
+ * returns error if was not owned
+ */
+int release_exclusive(struct cyttsp5_core_data *cd, void *ownptr)
+{
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	rc = release_exclusive_(cd, ownptr);
+	mutex_unlock(&cd->system_lock);
+
+	return rc;
+}
+
+static int cyttsp5_hid_exec_cmd_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_cmd *hid_cmd)
+{
+	int rc;
+	u8 *cmd;
+	u8 cmd_length;
+	u8 cmd_offset = 0;
+
+	cmd_length = 2 /* command register */
+		+ 2    /* command */
+		+ (hid_cmd->report_id >= 0XF ? 1 : 0)   /* Report ID */
+		+ (hid_cmd->has_data_register ? 2 : 0)  /* Data register */
+		+ hid_cmd->write_length;                /* Data length */
+
+	cmd = kzalloc(cmd_length, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	/* Set Command register */
+	memcpy(&cmd[cmd_offset], &cd->hid_desc.command_register,
+			sizeof(cd->hid_desc.command_register));
+	cmd_offset += sizeof(cd->hid_desc.command_register);
+
+	/* Set Command */
+	SET_CMD_REPORT_TYPE(cmd[cmd_offset], hid_cmd->report_type);
+
+	if (hid_cmd->report_id >= 0XF)
+		SET_CMD_REPORT_ID(cmd[cmd_offset], 0xF);
+	else
+		SET_CMD_REPORT_ID(cmd[cmd_offset], hid_cmd->report_id);
+	cmd_offset++;
+
+	SET_CMD_OPCODE(cmd[cmd_offset], hid_cmd->opcode);
+	cmd_offset++;
+
+	if (hid_cmd->report_id >= 0XF) {
+		cmd[cmd_offset] = hid_cmd->report_id;
+		cmd_offset++;
+	}
+
+	/* Set Data register */
+	if (hid_cmd->has_data_register) {
+		memcpy(&cmd[cmd_offset], &cd->hid_desc.data_register,
+				sizeof(cd->hid_desc.data_register));
+		cmd_offset += sizeof(cd->hid_desc.data_register);
+	}
+
+	/* Set Data */
+	if (hid_cmd->write_length && hid_cmd->write_buf) {
+		memcpy(&cmd[cmd_offset], hid_cmd->write_buf,
+				hid_cmd->write_length);
+		cmd_offset += hid_cmd->write_length;
+	}
+
+	rc = cyttsp5_adap_write_read_specific(cd, cmd_length, cmd,
+			hid_cmd->read_buf);
+	if (rc)
+		dev_err(cd->dev, "%s: Fail cyttsp5_adap_transfer\n", __func__);
+
+	kfree(cmd);
+	return rc;
+}
+
+static int cyttsp5_hid_exec_cmd_and_wait_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_cmd *hid_cmd)
+{
+	int rc;
+	int t;
+	u16 timeout_ms;
+	int *cmd_state;
+
+	if (hid_cmd->reset_cmd)
+		cmd_state = &cd->hid_reset_cmd_state;
+	else
+		cmd_state = &cd->hid_cmd_state;
+
+	if (hid_cmd->wait_interrupt) {
+		mutex_lock(&cd->system_lock);
+		*cmd_state = 1;
+		mutex_unlock(&cd->system_lock);
+	}
+
+	rc = cyttsp5_hid_exec_cmd_(cd, hid_cmd);
+	if (rc) {
+		if (hid_cmd->wait_interrupt)
+			goto error;
+
+		goto exit;
+	}
+
+	if (!hid_cmd->wait_interrupt)
+		goto exit;
+
+	if (hid_cmd->timeout_ms)
+		timeout_ms = hid_cmd->timeout_ms;
+	else
+		timeout_ms = CY_HID_RESET_TIMEOUT;
+
+	t = wait_event_timeout(cd->wait_q, (*cmd_state == 0),
+			msecs_to_jiffies(timeout_ms));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: HID output cmd execution timed out\n",
+			__func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	*cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+
+exit:
+	return rc;
+}
+
+static int cyttsp5_hid_cmd_reset_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_cmd hid_cmd = {
+		.opcode = HID_CMD_RESET,
+		.wait_interrupt = 1,
+		.reset_cmd = 1,
+		.timeout_ms = CY_HID_RESET_TIMEOUT,
+	};
+
+	return cyttsp5_hid_exec_cmd_and_wait_(cd, &hid_cmd);
+}
+
+static int cyttsp5_hid_cmd_reset(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_cmd_reset_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_hid_cmd_set_power_(struct cyttsp5_core_data *cd,
+		u8 power_state)
+{
+	int rc;
+	struct cyttsp5_hid_cmd hid_cmd = {
+		.opcode = HID_CMD_SET_POWER,
+		.wait_interrupt = 1,
+		.timeout_ms = CY_HID_SET_POWER_TIMEOUT,
+	};
+	hid_cmd.power_state = power_state;
+
+	rc =  cyttsp5_hid_exec_cmd_and_wait_(cd, &hid_cmd);
+	if (rc) {
+		dev_err(cd->dev, "%s: Failed to set power to state:%d\n",
+			__func__, power_state);
+		return rc;
+	}
+
+	/* validate */
+	if ((cd->response_buf[2] != HID_RESPONSE_REPORT_ID)
+		|| ((cd->response_buf[3] & 0x3) != power_state)
+		|| ((cd->response_buf[4] & 0xF) != HID_CMD_SET_POWER))
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static int cyttsp5_hid_cmd_set_power(struct cyttsp5_core_data *cd,
+		u8 power_state)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_cmd_set_power_(cd, power_state);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static const u16 crc_table[16] = {
+	0x0000, 0x1021, 0x2042, 0x3063,
+	0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b,
+	0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+};
+
+static u16 _cyttsp5_compute_crc(u8 *buf, u32 size)
+{
+	u16 remainder = 0xFFFF;
+	u16 xor_mask = 0x0000;
+	u32 index;
+	u32 byte_value;
+	u32 table_index;
+	u32 crc_bit_width = sizeof(u16) * 8;
+
+	/* Divide the message by polynomial, via the table. */
+	for (index = 0; index < size; index++) {
+		byte_value = buf[index];
+		table_index = ((byte_value >> 4) & 0x0F)
+			^ (remainder >> (crc_bit_width - 4));
+		remainder = crc_table[table_index] ^ (remainder << 4);
+		table_index = (byte_value & 0x0F)
+			^ (remainder >> (crc_bit_width - 4));
+		remainder = crc_table[table_index] ^ (remainder << 4);
+	}
+
+	/* Perform the final remainder CRC. */
+	return remainder ^ xor_mask;
+}
+
+static int cyttsp5_hid_output_validate_bl_response(
+		struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	u16 size;
+	u16 crc;
+	u8 status;
+
+	size = get_unaligned_le16(&cd->response_buf[0]);
+
+	if (hid_output->reset_expected && !size)
+		return 0;
+
+	if (cd->response_buf[HID_OUTPUT_RESPONSE_REPORT_OFFSET]
+			!= HID_BL_RESPONSE_REPORT_ID) {
+		dev_err(cd->dev, "%s: HID output response, wrong report_id\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	if (cd->response_buf[4] != HID_OUTPUT_BL_SOP) {
+		dev_err(cd->dev, "%s: HID output response, wrong SOP\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	if (cd->response_buf[size - 1] != HID_OUTPUT_BL_EOP) {
+		dev_err(cd->dev, "%s: HID output response, wrong EOP\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	crc = _cyttsp5_compute_crc(&cd->response_buf[4], size - 7);
+	if (cd->response_buf[size - 3] != LOW_BYTE(crc)
+			|| cd->response_buf[size - 2] != HI_BYTE(crc)) {
+		dev_err(cd->dev, "%s: HID output response, wrong CRC 0x%X\n",
+			__func__, crc);
+		return -EPROTO;
+	}
+
+	status = cd->response_buf[5];
+	if (status) {
+		dev_err(cd->dev, "%s: HID output response, ERROR:%d\n",
+			__func__, status);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_validate_app_response(
+		struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	int command_code;
+	u16 size;
+
+	size = get_unaligned_le16(&cd->response_buf[0]);
+
+	if (hid_output->reset_expected && !size)
+		return 0;
+
+	if (cd->response_buf[HID_OUTPUT_RESPONSE_REPORT_OFFSET]
+			!= HID_APP_RESPONSE_REPORT_ID) {
+		dev_err(cd->dev, "%s: HID output response, wrong report_id\n",
+			__func__);
+		return -EPROTO;
+	}
+
+	command_code = cd->response_buf[HID_OUTPUT_RESPONSE_CMD_OFFSET]
+		& HID_OUTPUT_RESPONSE_CMD_MASK;
+	if (command_code != hid_output->command_code) {
+		dev_err(cd->dev,
+			"%s: HID output response, wrong command_code:%X\n",
+			__func__, command_code);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+static void cyttsp5_check_set_parameter(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output, bool raw)
+{
+	u8 *param_buf;
+	u32 param_value = 0;
+	u8 param_size;
+	u8 param_id;
+	int i = 0;
+
+	if (!(cd->cpdata->flags & CY_CORE_FLAG_RESTORE_PARAMETERS))
+		return;
+
+	/* Check command input for Set Parameter command */
+	if (raw && hid_output->length >= 10 && hid_output->length <= 13
+			&& !memcmp(&hid_output->write_buf[0],
+					&cd->hid_desc.output_register,
+					sizeof(cd->hid_desc.output_register))
+			&& hid_output->write_buf[4] ==
+					HID_APP_OUTPUT_REPORT_ID
+			&& hid_output->write_buf[6] ==
+					HID_OUTPUT_SET_PARAM)
+		param_buf = &hid_output->write_buf[7];
+	else if (!raw && hid_output->cmd_type == HID_OUTPUT_CMD_APP
+			&& hid_output->command_code == HID_OUTPUT_SET_PARAM
+			&& hid_output->write_length >= 3
+			&& hid_output->write_length <= 6)
+		param_buf = &hid_output->write_buf[0];
+	else
+		return;
+
+	/* Get parameter ID, size and value */
+	param_id = param_buf[0];
+	param_size = param_buf[1];
+	if (param_size > 4) {
+		dev_err(cd->dev, "%s: Invalid parameter size\n", __func__);
+		return;
+	}
+
+	param_buf = &param_buf[2];
+	while (i < param_size)
+		param_value += *(param_buf++) << (8 * i++);
+
+	/* Check command response for Set Parameter command */
+	if (cd->response_buf[2] != HID_APP_RESPONSE_REPORT_ID
+			|| (cd->response_buf[4] & HID_OUTPUT_CMD_MASK) !=
+				HID_OUTPUT_SET_PARAM
+			|| cd->response_buf[5] != param_id
+			|| cd->response_buf[6] != param_size) {
+		dev_err(cd->dev, "%s: Set Parameter command not successful\n",
+			__func__);
+		return;
+	}
+
+	cyttsp5_add_parameter(cd, param_id, param_value, param_size);
+}
+
+static void cyttsp5_check_command(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output, bool raw)
+{
+	cyttsp5_check_set_parameter(cd, hid_output, raw);
+}
+
+static int cyttsp5_hid_output_validate_response(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	if (hid_output->cmd_type == HID_OUTPUT_CMD_BL)
+		return cyttsp5_hid_output_validate_bl_response(cd, hid_output);
+
+	return cyttsp5_hid_output_validate_app_response(cd, hid_output);
+
+}
+
+static int cyttsp5_hid_send_output_user_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	int rc;
+
+	if (!hid_output->length || !hid_output->write_buf)
+		return -EINVAL;
+
+	rc = cyttsp5_adap_write_read_specific(cd, hid_output->length,
+			hid_output->write_buf, NULL);
+	if (rc)
+		dev_err(cd->dev, "%s: Fail cyttsp5_adap_transfer\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_hid_send_output_user_and_wait_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	int rc;
+	int t;
+
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = HID_OUTPUT_USER_CMD + 1;
+	mutex_unlock(&cd->system_lock);
+
+	rc = cyttsp5_hid_send_output_user_(cd, hid_output);
+	if (rc)
+		goto error;
+
+	t = wait_event_timeout(cd->wait_q, (cd->hid_cmd_state == 0),
+			msecs_to_jiffies(CY_HID_OUTPUT_USER_TIMEOUT));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: HID output cmd execution timed out\n",
+			__func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	cyttsp5_check_command(cd, hid_output, true);
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+
+exit:
+	return rc;
+}
+
+static int cyttsp5_hid_send_output_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	int rc;
+	u8 *cmd;
+	u16 length;
+	u8 report_id;
+	u8 cmd_offset = 0;
+	u16 crc;
+	u8 cmd_allocated = 0;
+
+	switch (hid_output->cmd_type) {
+	case HID_OUTPUT_CMD_APP:
+		report_id = HID_APP_OUTPUT_REPORT_ID;
+		length = 5;
+		break;
+	case HID_OUTPUT_CMD_BL:
+		report_id = HID_BL_OUTPUT_REPORT_ID;
+		length = 11 /* 5 + SOP + LEN(2) + CRC(2) + EOP */;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	length += hid_output->write_length;
+
+	if (length + 2 > CYTTSP5_PREALLOCATED_CMD_BUFFER) {
+		cmd = kzalloc(length + 2, GFP_KERNEL);
+		if (!cmd)
+			return -ENOMEM;
+		cmd_allocated = 1;
+	} else
+		cmd = cd->cmd_buf;
+
+	/* Set Output register */
+	memcpy(&cmd[cmd_offset], &cd->hid_desc.output_register,
+			sizeof(cd->hid_desc.output_register));
+	cmd_offset += sizeof(cd->hid_desc.output_register);
+
+	cmd[cmd_offset++] = LOW_BYTE(length);
+	cmd[cmd_offset++] = HI_BYTE(length);
+	cmd[cmd_offset++] = report_id;
+	cmd[cmd_offset++] = 0x0; /* reserved */
+	if (hid_output->cmd_type == HID_OUTPUT_CMD_BL)
+		cmd[cmd_offset++] = HID_OUTPUT_BL_SOP;
+	cmd[cmd_offset++] = hid_output->command_code;
+
+	/* Set Data Length for bootloader */
+	if (hid_output->cmd_type == HID_OUTPUT_CMD_BL) {
+		cmd[cmd_offset++] = LOW_BYTE(hid_output->write_length);
+		cmd[cmd_offset++] = HI_BYTE(hid_output->write_length);
+	}
+	/* Set Data */
+	if (hid_output->write_length && hid_output->write_buf) {
+		memcpy(&cmd[cmd_offset], hid_output->write_buf,
+				hid_output->write_length);
+		cmd_offset += hid_output->write_length;
+	}
+	if (hid_output->cmd_type == HID_OUTPUT_CMD_BL) {
+		crc = _cyttsp5_compute_crc(&cmd[6],
+				hid_output->write_length + 4);
+		cmd[cmd_offset++] = LOW_BYTE(crc);
+		cmd[cmd_offset++] = HI_BYTE(crc);
+		cmd[cmd_offset++] = HID_OUTPUT_BL_EOP;
+	}
+
+	cyttsp5_pr_buf(cd->dev, cmd, length + 2, "command");
+	rc = cyttsp5_adap_write_read_specific(cd, length + 2, cmd, NULL);
+	if (rc)
+		dev_err(cd->dev, "%s: Fail cyttsp5_adap_transfer\n", __func__);
+
+	if (cmd_allocated)
+		kfree(cmd);
+	return rc;
+}
+
+static int cyttsp5_hid_send_output_and_wait_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_output *hid_output)
+{
+	int rc;
+	int t;
+#ifdef VERBOSE_DEBUG
+	u16 size;
+#endif
+	u16 timeout_ms;
+
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = hid_output->command_code + 1;
+	mutex_unlock(&cd->system_lock);
+
+	if (hid_output->timeout_ms)
+		timeout_ms = hid_output->timeout_ms;
+	else
+		timeout_ms = CY_HID_OUTPUT_TIMEOUT;
+
+	rc = cyttsp5_hid_send_output_(cd, hid_output);
+	if (rc)
+		goto error;
+
+
+	t = wait_event_timeout(cd->wait_q, (cd->hid_cmd_state == 0),
+			msecs_to_jiffies(timeout_ms));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: HID output cmd execution timed out\n",
+			__func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	if (!hid_output->novalidate)
+		rc = cyttsp5_hid_output_validate_response(cd, hid_output);
+
+	cyttsp5_check_command(cd, hid_output, false);
+
+#ifdef VERBOSE_DEBUG
+	size = get_unaligned_le16(&cd->response_buf[0]);
+	cyttsp5_pr_buf(cd->dev, cd->response_buf, size, "return_buf");
+#endif
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+exit:
+	return rc;
+}
+
+static int cyttsp5_hid_output_null_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_NULL),
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_null(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_null_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_start_bootloader_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_START_BOOTLOADER),
+		.timeout_ms = CY_HID_OUTPUT_START_BOOTLOADER_TIMEOUT,
+		.reset_expected = 1,
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_start_bootloader(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_start_bootloader_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_start_bl(struct device *dev, int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_start_bootloader(cd);
+
+	return cyttsp5_hid_output_start_bootloader_(cd);
+}
+
+static void cyttsp5_si_get_cydata(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_cydata *cydata = &cd->sysinfo.cydata;
+	struct cyttsp5_cydata_dev *cydata_dev =
+		(struct cyttsp5_cydata_dev *)
+		&cd->response_buf[HID_SYSINFO_CYDATA_OFFSET];
+
+	cydata->pip_ver_major = cydata_dev->pip_ver_major;
+	cydata->pip_ver_minor = cydata_dev->pip_ver_minor;
+	cydata->bl_ver_major = cydata_dev->bl_ver_major;
+	cydata->bl_ver_minor = cydata_dev->bl_ver_minor;
+	cydata->fw_ver_major = cydata_dev->fw_ver_major;
+	cydata->fw_ver_minor = cydata_dev->fw_ver_minor;
+
+	cydata->fw_pid = get_unaligned_le16(&cydata_dev->fw_pid);
+	cydata->fw_ver_conf = get_unaligned_le16(&cydata_dev->fw_ver_conf);
+	cydata->post_code = get_unaligned_le16(&cydata_dev->post_code);
+	cydata->revctrl = get_unaligned_le32(&cydata_dev->revctrl);
+	cydata->jtag_id_l = get_unaligned_le16(&cydata_dev->jtag_si_id_l);
+	cydata->jtag_id_h = get_unaligned_le16(&cydata_dev->jtag_si_id_h);
+
+	memcpy(cydata->mfg_id, cydata_dev->mfg_id, CY_NUM_MFGID);
+
+	cyttsp5_pr_buf(cd->dev, (u8 *)cydata_dev,
+		sizeof(struct cyttsp5_cydata_dev), "sysinfo_cydata");
+}
+
+static void cyttsp5_si_get_sensing_conf_data(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sensing_conf_data *scd = &cd->sysinfo.sensing_conf_data;
+	struct cyttsp5_sensing_conf_data_dev *scd_dev =
+		(struct cyttsp5_sensing_conf_data_dev *)
+		&cd->response_buf[HID_SYSINFO_SENSING_OFFSET];
+
+	scd->electrodes_x = scd_dev->electrodes_x;
+	scd->electrodes_y = scd_dev->electrodes_y;
+	scd->origin_x = scd_dev->origin_x;
+	scd->origin_y = scd_dev->origin_y;
+
+	/* PIP 1.4 (001-82649 *Q) add X_IS_TX bit in X_ORG */
+	if (scd->origin_x & 0x02) {
+		scd->tx_num = scd->electrodes_x;
+		scd->rx_num = scd->electrodes_y;
+	} else {
+		scd->tx_num = scd->electrodes_y;
+		scd->rx_num = scd->electrodes_x;
+	}
+
+	scd->panel_id = scd_dev->panel_id;
+	scd->btn = scd_dev->btn;
+	scd->scan_mode = scd_dev->scan_mode;
+	scd->max_tch = scd_dev->max_num_of_tch_per_refresh_cycle;
+
+	scd->res_x = get_unaligned_le16(&scd_dev->res_x);
+	scd->res_y = get_unaligned_le16(&scd_dev->res_y);
+	scd->max_z = get_unaligned_le16(&scd_dev->max_z);
+	scd->len_x = get_unaligned_le16(&scd_dev->len_x);
+	scd->len_y = get_unaligned_le16(&scd_dev->len_y);
+
+	cyttsp5_pr_buf(cd->dev, (u8 *)scd_dev,
+		sizeof(struct cyttsp5_sensing_conf_data_dev),
+		"sensing_conf_data");
+}
+
+static int cyttsp5_si_setup(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int max_tch = si->sensing_conf_data.max_tch;
+
+	if (!si->xy_data)
+		si->xy_data = kzalloc(max_tch * si->desc.tch_record_size,
+				GFP_KERNEL);
+	if (!si->xy_data)
+		return -ENOMEM;
+
+	if (!si->xy_mode)
+		si->xy_mode = kzalloc(si->desc.tch_header_size, GFP_KERNEL);
+	if (!si->xy_mode) {
+		kfree(si->xy_data);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int cyttsp5_si_get_btn_data(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int num_btns = 0;
+	int num_defined_keys;
+	u16 *key_table;
+	int btn;
+	int i;
+	int rc = 0;
+	unsigned int btns = cd->response_buf[HID_SYSINFO_BTN_OFFSET]
+		& HID_SYSINFO_BTN_MASK;
+	size_t btn_keys_size;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: get btn data\n", __func__);
+
+	for (i = 0; i < HID_SYSINFO_MAX_BTN; i++)
+		if (btns & (1 << i))
+			num_btns++;
+
+	si->num_btns = num_btns;
+
+	if (num_btns) {
+		btn_keys_size = num_btns * sizeof(struct cyttsp5_btn);
+		if (!si->btn)
+			si->btn = kzalloc(btn_keys_size, GFP_KERNEL);
+		if (!si->btn)
+			return -ENOMEM;
+
+		if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS] == NULL)
+			num_defined_keys = 0;
+		else if (cd->cpdata->sett[CY_IC_GRPNUM_BTN_KEYS]->data == NULL)
+			num_defined_keys = 0;
+		else
+			num_defined_keys = cd->cpdata->sett
+				[CY_IC_GRPNUM_BTN_KEYS]->size;
+
+		for (btn = 0; btn < num_btns && btn < num_defined_keys; btn++) {
+			key_table = (u16 *)cd->cpdata->sett
+				[CY_IC_GRPNUM_BTN_KEYS]->data;
+			si->btn[btn].key_code = key_table[btn];
+			si->btn[btn].enabled = true;
+		}
+		for (; btn < num_btns; btn++) {
+			si->btn[btn].key_code = KEY_RESERVED;
+			si->btn[btn].enabled = true;
+		}
+
+		return rc;
+	}
+
+	kfree(si->btn);
+	si->btn = NULL;
+	return rc;
+}
+
+static void cyttsp5_si_put_log_data(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	struct cyttsp5_cydata *cydata = &si->cydata;
+	struct cyttsp5_sensing_conf_data *scd = &si->sensing_conf_data;
+	int i;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: pip_ver_major =0x%02X (%d)\n",
+		__func__, cydata->pip_ver_major, cydata->pip_ver_major);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: pip_ver_minor =0x%02X (%d)\n",
+		__func__, cydata->pip_ver_minor, cydata->pip_ver_minor);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: fw_pid =0x%04X (%d)\n",
+		__func__, cydata->fw_pid, cydata->fw_pid);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: fw_ver_major =0x%02X (%d)\n",
+		__func__, cydata->fw_ver_major, cydata->fw_ver_major);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: fw_ver_minor =0x%02X (%d)\n",
+		__func__, cydata->fw_ver_minor, cydata->fw_ver_minor);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: revctrl =0x%08X (%d)\n",
+		__func__, cydata->revctrl, cydata->revctrl);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: fw_ver_conf =0x%04X (%d)\n",
+		__func__, cydata->fw_ver_conf, cydata->fw_ver_conf);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: bl_ver_major =0x%02X (%d)\n",
+		__func__, cydata->bl_ver_major, cydata->bl_ver_major);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: bl_ver_minor =0x%02X (%d)\n",
+		__func__, cydata->bl_ver_minor, cydata->bl_ver_minor);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: jtag_id_h =0x%04X (%d)\n",
+		__func__, cydata->jtag_id_h, cydata->jtag_id_h);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: jtag_id_l =0x%04X (%d)\n",
+		__func__, cydata->jtag_id_l, cydata->jtag_id_l);
+
+	for (i = 0; i < CY_NUM_MFGID; i++)
+		parade_debug(cd->dev, DEBUG_LEVEL_1,
+			"%s: mfg_id[%d] =0x%02X (%d)\n",
+			__func__, i, cydata->mfg_id[i], cydata->mfg_id[i]);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: post_code =0x%04X (%d)\n",
+		__func__, cydata->post_code, cydata->post_code);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: electrodes_x =0x%02X (%d)\n",
+		__func__, scd->electrodes_x, scd->electrodes_x);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: electrodes_y =0x%02X (%d)\n",
+		__func__, scd->electrodes_y, scd->electrodes_y);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: len_x =0x%04X (%d)\n",
+		__func__, scd->len_x, scd->len_x);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: len_y =0x%04X (%d)\n",
+		__func__, scd->len_y, scd->len_y);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: res_x =0x%04X (%d)\n",
+		__func__, scd->res_x, scd->res_x);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: res_y =0x%04X (%d)\n",
+		__func__, scd->res_y, scd->res_y);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: max_z =0x%04X (%d)\n",
+		__func__, scd->max_z, scd->max_z);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: origin_x =0x%02X (%d)\n",
+		__func__, scd->origin_x, scd->origin_x);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: origin_y =0x%02X (%d)\n",
+		__func__, scd->origin_y, scd->origin_y);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: panel_id =0x%02X (%d)\n",
+		__func__, scd->panel_id, scd->panel_id);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: btn =0x%02X (%d)\n",
+		__func__, scd->btn, scd->btn);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: scan_mode =0x%02X (%d)\n",
+		__func__, scd->scan_mode, scd->scan_mode);
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		"%s: max_num_of_tch_per_refresh_cycle =0x%02X (%d)\n",
+		__func__, scd->max_tch, scd->max_tch);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: xy_mode =%pK\n",
+		__func__, si->xy_mode);
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: xy_data =%pK\n",
+		__func__, si->xy_data);
+}
+
+static int cyttsp5_get_sysinfo_regs(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int rc;
+
+	rc = cyttsp5_si_get_btn_data(cd);
+	if (rc < 0)
+		return rc;
+
+	cyttsp5_si_get_cydata(cd);
+
+	cyttsp5_si_get_sensing_conf_data(cd);
+
+	cyttsp5_si_setup(cd);
+
+	cyttsp5_si_put_log_data(cd);
+
+	si->ready = true;
+	return rc;
+}
+
+static void cyttsp5_free_si_ptrs(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+
+	kfree(si->btn);
+	kfree(si->xy_mode);
+	kfree(si->xy_data);
+}
+
+static int cyttsp5_hid_output_get_sysinfo_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_GET_SYSINFO),
+		.timeout_ms = CY_HID_OUTPUT_GET_SYSINFO_TIMEOUT,
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	rc = cyttsp5_get_sysinfo_regs(cd);
+	if (rc)
+		cyttsp5_free_si_ptrs(cd);
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_get_sysinfo_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_suspend_scanning_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_SUSPEND_SCANNING),
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_suspend_scanning(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_suspend_scanning_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_suspend_scanning(struct device *dev,
+		int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_suspend_scanning(cd);
+
+	return cyttsp5_hid_output_suspend_scanning_(cd);
+}
+
+static int cyttsp5_hid_output_resume_scanning_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_RESUME_SCANNING),
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_resume_scanning(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_resume_scanning_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_resume_scanning(struct device *dev,
+		int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_resume_scanning(cd);
+
+	return cyttsp5_hid_output_resume_scanning_(cd);
+}
+
+static int cyttsp5_hid_output_get_param_(struct cyttsp5_core_data *cd,
+		u8 param_id, u32 *value)
+{
+	int write_length = 1;
+	u8 param[1] = { param_id };
+	u8 read_param_id;
+	int param_size;
+	u8 *ptr;
+	int rc;
+	int i;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_GET_PARAM),
+		.write_length = write_length,
+		.write_buf = param,
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	read_param_id = cd->response_buf[5];
+	if (read_param_id != param_id)
+		return -EPROTO;
+
+	param_size = cd->response_buf[6];
+	ptr = &cd->response_buf[7];
+	*value = 0;
+	for (i = 0; i < param_size; i++)
+		*value += ptr[i] << (i * 8);
+	return 0;
+}
+
+static int cyttsp5_hid_output_get_param(struct cyttsp5_core_data *cd,
+		u8 param_id, u32 *value)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_get_param_(cd, param_id, value);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+int _cyttsp5_request_hid_output_get_param(struct device *dev,
+		int protect, u8 param_id, u32 *value)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_get_param(cd, param_id, value);
+
+	return cyttsp5_hid_output_get_param_(cd, param_id, value);
+}
+
+static int cyttsp5_hid_output_set_param_(struct cyttsp5_core_data *cd,
+		u8 param_id, u32 value, u8 size)
+{
+	u8 write_buf[6];
+	u8 *ptr = &write_buf[2];
+	int rc;
+	int i;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_SET_PARAM),
+		.write_buf = write_buf,
+	};
+
+	write_buf[0] = param_id;
+	write_buf[1] = size;
+	for (i = 0; i < size; i++) {
+		ptr[i] = value & 0xFF;
+		value = value >> 8;
+	}
+
+	hid_output.write_length = 2 + size;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	if (param_id != cd->response_buf[5] || size != cd->response_buf[6])
+		return -EPROTO;
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_set_param(struct cyttsp5_core_data *cd,
+		u8 param_id, u32 value, u8 size)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_set_param_(cd, param_id, value, size);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+int _cyttsp5_request_hid_output_set_param(struct device *dev,
+		int protect, u8 param_id, u32 value, u8  size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_set_param(cd, param_id, value, size);
+
+	return cyttsp5_hid_output_set_param_(cd, param_id, value, size);
+}
+
+static int cyttsp5_hid_output_enter_easywake_state_(
+		struct cyttsp5_core_data *cd, u8 data, u8 *return_data)
+{
+	int write_length = 1;
+	u8 param[1] = { data };
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_ENTER_EASYWAKE_STATE),
+		.write_length = write_length,
+		.write_buf = param,
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	*return_data = cd->response_buf[5];
+	return rc;
+}
+
+#ifdef CY_GES_WAKEUP
+static int cyttsp5_hid_output_exit_easywake_state_(
+		struct cyttsp5_core_data *cd, u8 data, u8 *return_data)
+{
+	int write_length = 1;
+	u8 param[1] = { data };
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_EXIT_EASYWAKE_STATE),
+		.write_length = write_length,
+		.write_buf = param,
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	*return_data = cd->response_buf[5];
+	return rc;
+}
+#endif
+
+static int cyttsp5_hid_output_verify_config_block_crc_(
+		struct cyttsp5_core_data *cd, u8 ebid, u8 *status,
+		u16 *calculated_crc, u16 *stored_crc)
+{
+	int write_length = 1;
+	u8 param[1] = { ebid };
+	u8 *ptr;
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_VERIFY_CONFIG_BLOCK_CRC),
+		.write_length = write_length,
+		.write_buf = param,
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	ptr = &cd->response_buf[5];
+	*status = ptr[0];
+	*calculated_crc = get_unaligned_le16(&ptr[1]);
+	*stored_crc = get_unaligned_le16(&ptr[3]);
+	return 0;
+}
+
+static int cyttsp5_hid_output_verify_config_block_crc(
+		struct cyttsp5_core_data *cd, u8 ebid, u8 *status,
+		u16 *calculated_crc, u16 *stored_crc)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_verify_config_block_crc_(cd, ebid, status,
+			calculated_crc, stored_crc);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_verify_config_block_crc(
+		struct device *dev, int protect, u8 ebid, u8 *status,
+		u16 *calculated_crc, u16 *stored_crc)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_verify_config_block_crc(cd, ebid,
+				status, calculated_crc, stored_crc);
+
+	return cyttsp5_hid_output_verify_config_block_crc_(cd, ebid,
+			status, calculated_crc, stored_crc);
+}
+
+static int cyttsp5_hid_output_get_config_row_size_(struct cyttsp5_core_data *cd,
+		u16 *row_size)
+{
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_GET_CONFIG_ROW_SIZE),
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	*row_size = get_unaligned_le16(&cd->response_buf[5]);
+	return 0;
+}
+
+static int cyttsp5_hid_output_get_config_row_size(struct cyttsp5_core_data *cd,
+		u16 *row_size)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_get_config_row_size_(cd, row_size);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_get_config_row_size(struct device *dev,
+		int protect, u16 *row_size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_get_config_row_size(cd, row_size);
+
+	return cyttsp5_hid_output_get_config_row_size_(cd, row_size);
+}
+
+static int cyttsp5_hid_output_read_conf_block_(struct cyttsp5_core_data *cd,
+		u16 row_number, u16 length, u8 ebid, u8 *read_buf, u16 *crc)
+{
+	int read_ebid;
+	int read_length;
+	int status;
+	int rc;
+	int write_length = 5;
+	u8 write_buf[5];
+	u8 cmd_offset = 0;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_READ_CONF_BLOCK),
+		.write_length = write_length,
+		.write_buf = write_buf,
+	};
+
+	write_buf[cmd_offset++] = LOW_BYTE(row_number);
+	write_buf[cmd_offset++] = HI_BYTE(row_number);
+	write_buf[cmd_offset++] = LOW_BYTE(length);
+	write_buf[cmd_offset++] = HI_BYTE(length);
+	write_buf[cmd_offset++] = ebid;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	status = cd->response_buf[5];
+	if (status)
+		return -EINVAL;
+
+	read_ebid = cd->response_buf[6];
+	if ((read_ebid != ebid) || (cd->response_buf[9] != 0))
+		return -EPROTO;
+
+	read_length = get_unaligned_le16(&cd->response_buf[7]);
+	if (length < read_length)
+		length = read_length;
+
+	memcpy(read_buf, &cd->response_buf[10], length);
+	*crc = get_unaligned_le16(&cd->response_buf[read_length + 10]);
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_read_conf_ver_(struct cyttsp5_core_data *cd,
+		u16 *config_ver)
+{
+	int rc;
+	u8 read_buf[CY_TTCONFIG_VERSION_OFFSET + CY_TTCONFIG_VERSION_SIZE];
+	u16 crc;
+
+	rc = cyttsp5_hid_output_read_conf_block_(cd, CY_TTCONFIG_VERSION_ROW,
+			CY_TTCONFIG_VERSION_OFFSET + CY_TTCONFIG_VERSION_SIZE,
+			CY_TCH_PARM_EBID, read_buf, &crc);
+	if (rc)
+		return rc;
+
+	*config_ver = get_unaligned_le16(
+				&read_buf[CY_TTCONFIG_VERSION_OFFSET]);
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_write_conf_block_(struct cyttsp5_core_data *cd,
+		u16 row_number, u16 write_length, u8 ebid, u8 *write_buf,
+		u8 *security_key, u16 *actual_write_len)
+{
+	/* row_number + write_len + ebid + security_key + crc */
+	int full_write_length = 2 + 2 + 1 + write_length + 8 + 2;
+	u8 *full_write_buf;
+	u8 cmd_offset = 0;
+	u16 crc;
+	int status;
+	int rc;
+	int read_ebid;
+	u8 *data;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_WRITE_CONF_BLOCK),
+		.write_length = full_write_length,
+		.timeout_ms = CY_HID_OUTPUT_WRITE_CONF_BLOCK_TIMEOUT,
+	};
+
+	full_write_buf = kzalloc(full_write_length, GFP_KERNEL);
+	if (!full_write_buf)
+		return -ENOMEM;
+
+	hid_output.write_buf = full_write_buf;
+	full_write_buf[cmd_offset++] = LOW_BYTE(row_number);
+	full_write_buf[cmd_offset++] = HI_BYTE(row_number);
+	full_write_buf[cmd_offset++] = LOW_BYTE(write_length);
+	full_write_buf[cmd_offset++] = HI_BYTE(write_length);
+	full_write_buf[cmd_offset++] = ebid;
+	data = &full_write_buf[cmd_offset];
+	memcpy(data, write_buf, write_length);
+	cmd_offset += write_length;
+	memcpy(&full_write_buf[cmd_offset], security_key, 8);
+	cmd_offset += 8;
+	crc = _cyttsp5_compute_crc(data, write_length);
+	full_write_buf[cmd_offset++] = LOW_BYTE(crc);
+	full_write_buf[cmd_offset++] = HI_BYTE(crc);
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		goto exit;
+
+	status = cd->response_buf[5];
+	if (status) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	read_ebid = cd->response_buf[6];
+	if (read_ebid != ebid) {
+		rc = -EPROTO;
+		goto exit;
+	}
+
+	*actual_write_len = get_unaligned_le16(&cd->response_buf[7]);
+
+exit:
+	kfree(full_write_buf);
+	return rc;
+}
+
+static int cyttsp5_hid_output_write_conf_block(struct cyttsp5_core_data *cd,
+		u16 row_number, u16 write_length, u8 ebid, u8 *write_buf,
+		u8 *security_key, u16 *actual_write_len)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_write_conf_block_(cd, row_number, write_length,
+			ebid, write_buf, security_key, actual_write_len);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_write_conf_block(struct device *dev,
+		int protect, u16 row_number, u16 write_length, u8 ebid,
+		u8 *write_buf, u8 *security_key, u16 *actual_write_len)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_write_conf_block(cd, row_number,
+				write_length, ebid, write_buf, security_key,
+				actual_write_len);
+
+	return cyttsp5_hid_output_write_conf_block_(cd, row_number,
+			write_length, ebid, write_buf, security_key,
+			actual_write_len);
+}
+
+static int cyttsp5_hid_output_get_data_structure_(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_length,
+		u8 data_id, u8 *status, u8 *data_format, u16 *actual_read_len,
+		u8 *data)
+{
+	int rc;
+	u16 total_read_len = 0;
+	u16 read_len;
+	u16 off_buf = 0;
+	u8 write_buf[5];
+	u8 read_data_id;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_GET_DATA_STRUCTURE),
+		.write_length = 5,
+		.write_buf = write_buf,
+	};
+
+again:
+	write_buf[0] = LOW_BYTE(read_offset);
+	write_buf[1] = HI_BYTE(read_offset);
+	write_buf[2] = LOW_BYTE(read_length);
+	write_buf[3] = HI_BYTE(read_length);
+	write_buf[4] = data_id;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	if (cd->response_buf[5] != CY_CMD_STATUS_SUCCESS)
+		goto set_status;
+
+	read_data_id = cd->response_buf[6];
+	if (read_data_id != data_id)
+		return -EPROTO;
+
+	read_len = get_unaligned_le16(&cd->response_buf[7]);
+	if (read_len && data) {
+		memcpy(&data[off_buf], &cd->response_buf[10], read_len);
+
+		total_read_len += read_len;
+
+		if (read_len < read_length) {
+			read_offset += read_len;
+			off_buf += read_len;
+			read_length -= read_len;
+			goto again;
+		}
+	}
+
+	if (data_format)
+		*data_format = cd->response_buf[9];
+	if (actual_read_len)
+		*actual_read_len = total_read_len;
+set_status:
+	if (status)
+		*status = cd->response_buf[5];
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_get_data_structure(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_length,
+		u8 data_id, u8 *status, u8 *data_format, u16 *actual_read_len,
+		u8 *data)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_get_data_structure_(cd, read_offset,
+			read_length, data_id, status, data_format,
+			actual_read_len, data);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_get_data_structure(struct device *dev,
+		int protect, u16 read_offset, u16 read_length, u8 data_id,
+		u8 *status, u8 *data_format, u16 *actual_read_len, u8 *data)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_get_data_structure(cd,
+				read_offset, read_length, data_id, status,
+				data_format, actual_read_len, data);
+
+	return cyttsp5_hid_output_get_data_structure_(cd,
+			read_offset, read_length, data_id, status,
+			data_format, actual_read_len, data);
+}
+
+static int cyttsp5_hid_output_run_selftest_(
+		struct cyttsp5_core_data *cd, u8 test_id,
+		u8 write_idacs_to_flash, u8 *status, u8 *summary_result,
+		u8 *results_available)
+{
+	int rc;
+	u8 write_buf[2];
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_RUN_SELF_TEST),
+		.write_length = 2,
+		.write_buf = write_buf,
+		.timeout_ms = CY_HID_OUTPUT_RUN_SELF_TEST_TIMEOUT,
+	};
+
+	write_buf[0] = test_id;
+	write_buf[1] = write_idacs_to_flash;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	if (status)
+		*status = cd->response_buf[5];
+	if (summary_result)
+		*summary_result = cd->response_buf[6];
+	if (results_available)
+		*results_available = cd->response_buf[7];
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_run_selftest(
+		struct cyttsp5_core_data *cd, u8 test_id,
+		u8 write_idacs_to_flash, u8 *status, u8 *summary_result,
+		u8 *results_available)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_run_selftest_(cd, test_id,
+			write_idacs_to_flash, status, summary_result,
+			results_available);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_run_selftest(struct device *dev,
+		int protect, u8 test_id, u8 write_idacs_to_flash, u8 *status,
+		u8 *summary_result, u8 *results_available)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_run_selftest(cd, test_id,
+				write_idacs_to_flash, status, summary_result,
+				results_available);
+
+	return cyttsp5_hid_output_run_selftest_(cd, test_id,
+			write_idacs_to_flash, status, summary_result,
+			results_available);
+}
+
+static int cyttsp5_hid_output_get_selftest_result_(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_length,
+		u8 test_id, u8 *status, u16 *actual_read_len, u8 *data)
+{
+	int rc;
+	u16 total_read_len = 0;
+	u16 read_len;
+	u16 off_buf = 0;
+	u8 write_buf[5];
+	u8 read_test_id;
+	bool repeat;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_GET_SELF_TEST_RESULT),
+		.write_length = 5,
+		.write_buf = write_buf,
+	};
+
+	/*
+	 * Do not repeat reading for Auto Shorts test
+	 * when PIP version < 1.3
+	 */
+	repeat = IS_PIP_VER_GE(&cd->sysinfo, 1, 3)
+			|| test_id != CY_ST_ID_AUTOSHORTS;
+
+again:
+	write_buf[0] = LOW_BYTE(read_offset);
+	write_buf[1] = HI_BYTE(read_offset);
+	write_buf[2] = LOW_BYTE(read_length);
+	write_buf[3] = HI_BYTE(read_length);
+	write_buf[4] = test_id;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	if (cd->response_buf[5] != CY_CMD_STATUS_SUCCESS)
+		goto set_status;
+
+	read_test_id = cd->response_buf[6];
+	if (read_test_id != test_id)
+		return -EPROTO;
+
+	read_len = get_unaligned_le16(&cd->response_buf[7]);
+	if (read_len && data) {
+		memcpy(&data[off_buf], &cd->response_buf[10], read_len);
+
+		total_read_len += read_len;
+
+		if (repeat && read_len < read_length) {
+			read_offset += read_len;
+			off_buf += read_len;
+			read_length -= read_len;
+			goto again;
+		}
+	}
+
+	if (actual_read_len)
+		*actual_read_len = total_read_len;
+set_status:
+	if (status)
+		*status = cd->response_buf[5];
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_get_selftest_result(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_length,
+		u8 test_id, u8 *status, u16 *actual_read_len, u8 *data)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_get_selftest_result_(cd, read_offset,
+			read_length, test_id, status, actual_read_len, data);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_get_selftest_result(struct device *dev,
+		int protect, u16 read_offset, u16 read_length, u8 test_id,
+		u8 *status, u16 *actual_read_len, u8 *data)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_get_selftest_result(cd, read_offset,
+				read_length, test_id, status, actual_read_len,
+				data);
+
+	return cyttsp5_hid_output_get_selftest_result_(cd, read_offset,
+			read_length, test_id, status, actual_read_len,
+			data);
+}
+
+static int cyttsp5_hid_output_calibrate_idacs_(struct cyttsp5_core_data *cd,
+		u8 mode, u8 *status)
+{
+	int rc;
+	int write_length = 1;
+	u8 write_buf[1];
+	u8 cmd_offset = 0;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_CALIBRATE_IDACS),
+		.write_length = write_length,
+		.write_buf = write_buf,
+		.timeout_ms = CY_HID_OUTPUT_CALIBRATE_IDAC_TIMEOUT,
+	};
+
+	write_buf[cmd_offset++] = mode;
+	rc =  cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	*status = cd->response_buf[5];
+	if (*status)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_calibrate_idacs(struct cyttsp5_core_data *cd,
+		u8 mode, u8 *status)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_calibrate_idacs_(cd, mode, status);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_calibrate_idacs(struct device *dev,
+		int protect, u8 mode, u8 *status)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_calibrate_idacs(cd, mode, status);
+
+	return cyttsp5_hid_output_calibrate_idacs_(cd, mode, status);
+}
+
+static int cyttsp5_hid_output_initialize_baselines_(
+		struct cyttsp5_core_data *cd, u8 test_id, u8 *status)
+{
+	int rc;
+	int write_length = 1;
+	u8 write_buf[1];
+	u8 cmd_offset = 0;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_INITIALIZE_BASELINES),
+		.write_length = write_length,
+		.write_buf = write_buf,
+	};
+
+	write_buf[cmd_offset++] = test_id;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	*status = cd->response_buf[5];
+	if (*status)
+		return -EINVAL;
+
+	return rc;
+}
+
+static int cyttsp5_hid_output_initialize_baselines(struct cyttsp5_core_data *cd,
+		u8 test_id, u8 *status)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_initialize_baselines_(cd, test_id, status);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_initialize_baselines(struct device *dev,
+		int protect, u8 test_id, u8 *status)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_initialize_baselines(cd, test_id,
+				status);
+
+	return cyttsp5_hid_output_initialize_baselines_(cd, test_id, status);
+}
+
+static int cyttsp5_hid_output_exec_panel_scan_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_EXEC_PANEL_SCAN),
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_exec_panel_scan(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_exec_panel_scan_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_exec_panel_scan(struct device *dev,
+		int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_exec_panel_scan(cd);
+
+	return cyttsp5_hid_output_exec_panel_scan_(cd);
+}
+
+/* @response: set none NULL only if all response required including header */
+static int cyttsp5_hid_output_retrieve_panel_scan_(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_count,
+		u8 data_id, u8 *response, u8 *config, u16 *actual_read_len,
+		u8 *read_buf)
+{
+	int status;
+	u8 read_data_id;
+	int rc;
+	int write_length = 5;
+	u8 write_buf[5];
+	u8 cmd_offset = 0;
+	u8 data_elem_size;
+	int size;
+	int data_size;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_APP_COMMAND(HID_OUTPUT_RETRIEVE_PANEL_SCAN),
+		.write_length = write_length,
+		.write_buf = write_buf,
+	};
+
+	write_buf[cmd_offset++] = LOW_BYTE(read_offset);
+	write_buf[cmd_offset++] = HI_BYTE(read_offset);
+	write_buf[cmd_offset++] = LOW_BYTE(read_count);
+	write_buf[cmd_offset++] = HI_BYTE(read_count);
+	write_buf[cmd_offset++] = data_id;
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	status = cd->response_buf[5];
+	if (status)
+		return -EINVAL;
+
+	read_data_id = cd->response_buf[6];
+	if (read_data_id != data_id)
+		return -EPROTO;
+
+	size = get_unaligned_le16(&cd->response_buf[0]);
+	*actual_read_len = get_unaligned_le16(&cd->response_buf[7]);
+	*config = cd->response_buf[9];
+
+	data_elem_size = *config & 0x07;
+	data_size = *actual_read_len * data_elem_size;
+
+	if (read_buf)
+		memcpy(read_buf, &cd->response_buf[10], data_size);
+	if (response)
+		memcpy(response, cd->response_buf, size);
+	return rc;
+}
+
+static int cyttsp5_hid_output_retrieve_panel_scan(
+		struct cyttsp5_core_data *cd, u16 read_offset, u16 read_count,
+		u8 data_id, u8 *response, u8 *config, u16 *actual_read_len,
+		u8 *read_buf)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_retrieve_panel_scan_(cd, read_offset,
+			read_count, data_id, response, config,
+			actual_read_len, read_buf);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_retrieve_panel_scan(struct device *dev,
+		int protect, u16 read_offset, u16 read_count, u8 data_id,
+		u8 *response, u8 *config, u16 *actual_read_len, u8 *read_buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_retrieve_panel_scan(cd,
+				read_offset, read_count, data_id, response,
+				config, actual_read_len, read_buf);
+
+	return cyttsp5_hid_output_retrieve_panel_scan_(cd,
+			read_offset, read_count, data_id, response,
+			config, actual_read_len, read_buf);
+}
+
+static int cyttsp5_hid_output_user_cmd_(struct cyttsp5_core_data *cd,
+		u16 read_len, u8 *read_buf, u16 write_len, u8 *write_buf,
+		u16 *actual_read_len)
+{
+	int rc;
+	u16 size;
+#ifdef TTHE_TUNER_SUPPORT
+	int command_code = 0;
+	int len;
+#endif
+	struct cyttsp5_hid_output hid_output = {
+		.length = write_len,
+		.write_buf = write_buf,
+	};
+
+	rc = cyttsp5_hid_send_output_user_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	size = get_unaligned_le16(&cd->response_buf[0]);
+	if (size == 0)
+		size = 2;
+
+	if (size > read_len) {
+		*actual_read_len = 0;
+		return -EINVAL;
+	}
+
+	memcpy(read_buf, cd->response_buf, size);
+	*actual_read_len = size;
+
+#ifdef TTHE_TUNER_SUPPORT
+	/* print up to cmd code */
+	len = HID_OUTPUT_CMD_OFFSET + 1;
+	if (write_len < len)
+		len = write_len;
+	else
+		command_code = write_buf[HID_OUTPUT_CMD_OFFSET]
+			& HID_OUTPUT_CMD_MASK;
+
+	/* Do not print for EXEC_PANEL_SCAN & RETRIEVE_PANEL_SCAN commands */
+	if (command_code != HID_OUTPUT_EXEC_PANEL_SCAN
+			&& command_code != HID_OUTPUT_RETRIEVE_PANEL_SCAN)
+		tthe_print(cd, write_buf, len, "CMD=");
+#endif
+
+	return 0;
+}
+
+static int cyttsp5_get_config_ver_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int rc;
+	u16 config_ver = 0;
+
+	rc = cyttsp5_hid_output_suspend_scanning_(cd);
+	if (rc)
+		goto error;
+
+	rc = cyttsp5_hid_output_read_conf_ver_(cd, &config_ver);
+	if (rc)
+		goto exit;
+
+	si->cydata.fw_ver_conf = config_ver;
+
+exit:
+	cyttsp5_hid_output_resume_scanning_(cd);
+error:
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: CONFIG_VER:%04X\n",
+		__func__, config_ver);
+	return rc;
+}
+
+static int cyttsp5_hid_output_user_cmd(struct cyttsp5_core_data *cd,
+		u16 read_len, u8 *read_buf, u16 write_len, u8 *write_buf,
+		u16 *actual_read_len)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_user_cmd_(cd, read_len, read_buf,
+			write_len, write_buf, actual_read_len);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_user_cmd(struct device *dev,
+		int protect, u16 read_len, u8 *read_buf, u16 write_len,
+		u8 *write_buf, u16 *actual_read_len)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_user_cmd(cd, read_len, read_buf,
+				write_len, write_buf, actual_read_len);
+
+	return cyttsp5_hid_output_user_cmd_(cd, read_len, read_buf,
+			write_len, write_buf, actual_read_len);
+}
+
+static int cyttsp5_hid_output_bl_get_information_(struct cyttsp5_core_data *cd,
+		u8 *return_data)
+{
+	int rc;
+	int data_len;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_GET_INFO),
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc)
+		return rc;
+
+	data_len = get_unaligned_le16(&cd->input_buf[6]);
+	if (!data_len)
+		return -EPROTO;
+
+	memcpy(return_data, &cd->response_buf[8], data_len);
+
+	return 0;
+}
+
+static int cyttsp5_hid_output_bl_get_information(struct cyttsp5_core_data *cd,
+		u8 *return_data)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_get_information_(cd, return_data);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_bl_get_information(struct device *dev,
+		int protect, u8 *return_data)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_get_information(cd, return_data);
+
+	return cyttsp5_hid_output_bl_get_information_(cd, return_data);
+}
+
+static int cyttsp5_hid_output_bl_initiate_bl_(struct cyttsp5_core_data *cd,
+		u16 key_size, u8 *key_buf, u16 row_size, u8 *metadata_row_buf)
+{
+	u16 write_length = key_size + row_size;
+	u8 *write_buf;
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_INITIATE_BL),
+		.write_length = write_length,
+		.timeout_ms = CY_HID_OUTPUT_BL_INITIATE_BL_TIMEOUT,
+	};
+
+	write_buf = kzalloc(write_length, GFP_KERNEL);
+	if (!write_buf)
+		return -ENOMEM;
+
+	hid_output.write_buf = write_buf;
+
+	if (key_size)
+		memcpy(write_buf, key_buf, key_size);
+
+	if (row_size)
+		memcpy(&write_buf[key_size], metadata_row_buf, row_size);
+
+	rc =  cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+
+	kfree(write_buf);
+	return rc;
+}
+
+static int cyttsp5_hid_output_bl_initiate_bl(struct cyttsp5_core_data *cd,
+		u16 key_size, u8 *key_buf, u16 row_size, u8 *metadata_row_buf)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_initiate_bl_(cd, key_size, key_buf,
+			row_size, metadata_row_buf);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_bl_initiate_bl(struct device *dev,
+		int protect, u16 key_size, u8 *key_buf, u16 row_size,
+		u8 *metadata_row_buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_initiate_bl(cd, key_size, key_buf,
+				row_size, metadata_row_buf);
+
+	return cyttsp5_hid_output_bl_initiate_bl_(cd, key_size, key_buf,
+			row_size, metadata_row_buf);
+}
+
+static int cyttsp5_hid_output_bl_program_and_verify_(
+		struct cyttsp5_core_data *cd, u16 data_len, u8 *data_buf)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_PROGRAM_AND_VERIFY),
+		.write_length = data_len,
+		.write_buf = data_buf,
+		.timeout_ms = CY_HID_OUTPUT_BL_PROGRAM_AND_VERIFY_TIMEOUT,
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_bl_program_and_verify(
+		struct cyttsp5_core_data *cd, u16 data_len, u8 *data_buf)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_program_and_verify_(cd, data_len, data_buf);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_bl_program_and_verify(
+		struct device *dev, int protect, u16 data_len, u8 *data_buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_program_and_verify(cd, data_len,
+				data_buf);
+
+	return cyttsp5_hid_output_bl_program_and_verify_(cd, data_len,
+			data_buf);
+}
+
+static int cyttsp5_hid_output_bl_verify_app_integrity_(
+		struct cyttsp5_core_data *cd, u8 *result)
+{
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_VERIFY_APP_INTEGRITY),
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc) {
+		*result = 0;
+		return rc;
+	}
+
+	*result = cd->response_buf[8];
+	return 0;
+}
+
+static int cyttsp5_hid_output_bl_verify_app_integrity(
+		struct cyttsp5_core_data *cd, u8 *result)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_verify_app_integrity_(cd, result);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_bl_verify_app_integrity(
+		struct device *dev, int protect, u8 *result)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_verify_app_integrity(cd, result);
+
+	return cyttsp5_hid_output_bl_verify_app_integrity_(cd, result);
+}
+
+static int cyttsp5_hid_output_bl_launch_app_(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_LAUNCH_APP),
+		.reset_expected = 1,
+	};
+
+	return cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+}
+
+static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_launch_app_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_launch_app(struct device *dev,
+		int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_launch_app(cd);
+
+	return cyttsp5_hid_output_bl_launch_app_(cd);
+}
+
+static int cyttsp5_hid_output_bl_get_panel_id_(
+		struct cyttsp5_core_data *cd, u8 *panel_id)
+{
+	int rc;
+	struct cyttsp5_hid_output hid_output = {
+		HID_OUTPUT_BL_COMMAND(HID_OUTPUT_BL_GET_PANEL_ID),
+	};
+
+	rc = cyttsp5_hid_send_output_and_wait_(cd, &hid_output);
+	if (rc == -EPROTO && cd->response_buf[5] == ERROR_COMMAND) {
+		parade_debug(cd->dev, DEBUG_LEVEL_1,
+			"%s: Get Panel ID command not supported\n",
+			__func__);
+		*panel_id = PANEL_ID_NOT_ENABLED;
+		return 0;
+	} else if (rc < 0) {
+		dev_err(cd->dev, "%s: Error on Get Panel ID command\n",
+			__func__);
+		return rc;
+	}
+
+	*panel_id = cd->response_buf[8];
+	return 0;
+}
+
+static int cyttsp5_hid_output_bl_get_panel_id(
+		struct cyttsp5_core_data *cd, u8 *panel_id)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_hid_output_bl_get_panel_id_(cd, panel_id);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_hid_output_bl_get_panel_id(
+		struct device *dev, int protect, u8 *panel_id)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_hid_output_bl_get_panel_id(cd, panel_id);
+
+	return cyttsp5_hid_output_bl_get_panel_id_(cd, panel_id);
+}
+
+static int cyttsp5_get_hid_descriptor_(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_desc *desc)
+{
+	struct device *dev = cd->dev;
+	int rc;
+	int t;
+	u8 cmd[2];
+
+	/* Read HID descriptor length and version */
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 1;
+	mutex_unlock(&cd->system_lock);
+
+	/* Set HID descriptor register */
+	memcpy(cmd, &cd->hid_core.hid_desc_register,
+		sizeof(cd->hid_core.hid_desc_register));
+
+	rc = cyttsp5_adap_write_read_specific(cd, 2, cmd, NULL);
+	if (rc) {
+		dev_err(dev,
+			"%s: get HID descriptor length and ver failed, rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+
+	t = wait_event_timeout(cd->wait_q, (cd->hid_cmd_state == 0),
+			msecs_to_jiffies(CY_HID_GET_HID_DESCRIPTOR_TIMEOUT));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: HID get descriptor timed out\n",
+			__func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	memcpy((u8 *)desc, cd->response_buf, sizeof(struct cyttsp5_hid_desc));
+
+	/* Check HID descriptor length and version */
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: HID len:%X HID ver:%X\n",
+		__func__,
+		le16_to_cpu(desc->hid_desc_len),
+		le16_to_cpu(desc->bcd_version));
+
+	if (le16_to_cpu(desc->hid_desc_len) != sizeof(*desc) ||
+		le16_to_cpu(desc->bcd_version) != CY_HID_VERSION) {
+		dev_err(dev, "%s: Unsupported HID version\n", __func__);
+		return -ENODEV;
+	}
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+exit:
+	return rc;
+}
+
+static int cyttsp5_get_hid_descriptor(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_desc *desc)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_get_hid_descriptor_(cd, desc);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int _cyttsp5_request_get_hid_desc(struct device *dev, int protect)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (protect)
+		return cyttsp5_get_hid_descriptor(cd, &cd->hid_desc);
+
+	return cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+}
+
+static int cyttsp5_hw_soft_reset(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	if (cd->hid_desc.hid_desc_len == 0) {
+		rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = cyttsp5_hid_cmd_reset_(cd);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: FAILED to execute SOFT reset\n",
+			__func__);
+		return rc;
+	}
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: execute SOFT reset\n",
+		__func__);
+	return 0;
+}
+
+static int cyttsp5_hw_hard_reset(struct cyttsp5_core_data *cd)
+{
+	if (cd->cpdata->xres) {
+		cd->cpdata->xres(cd->cpdata, cd->dev);
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: execute HARD reset\n",
+			__func__);
+		return 0;
+	}
+	dev_err(cd->dev, "%s: FAILED to execute HARD reset\n", __func__);
+	return -ENODEV;
+}
+
+static int cyttsp5_hw_reset(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	rc = cyttsp5_hw_hard_reset(cd);
+	mutex_unlock(&cd->system_lock);
+	if (rc == -ENODEV)
+		rc = cyttsp5_hw_soft_reset(cd);
+	return rc;
+}
+
+static inline int get_hid_item_data(u8 *data, int item_size)
+{
+	if (item_size == 1)
+		return (int)*data;
+	else if (item_size == 2)
+		return (int)get_unaligned_le16(data);
+	else if (item_size == 4)
+		return (int)get_unaligned_le32(data);
+	return 0;
+}
+
+static int parse_report_descriptor(struct cyttsp5_core_data *cd,
+		u8 *report_desc, size_t len)
+{
+	struct cyttsp5_hid_report *report;
+	struct cyttsp5_hid_field *field;
+	u8 *buf = report_desc;
+	u8 *end = buf + len;
+	int rc = 0;
+	int offset = 0;
+	int i;
+	u8 report_type;
+	u32 up_usage;
+	/* Global items */
+	u8 report_id = 0;
+	u16 usage_page = 0;
+	int report_count = 0;
+	int report_size = 0;
+	int logical_min = 0;
+	int logical_max = 0;
+	/* Local items */
+	u16 usage = 0;
+	/* Main items - Collection stack */
+	u32 collection_usages[CY_HID_MAX_NESTED_COLLECTIONS] = {0};
+	u8 collection_types[CY_HID_MAX_NESTED_COLLECTIONS] = {0};
+	/* First collection for header, second for report */
+	int logical_collection_count = 0;
+	int collection_nest = 0;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2,
+		"%s: Report descriptor length: %u\n",
+		__func__, (u32)len);
+
+	mutex_lock(&cd->hid_report_lock);
+	cyttsp5_free_hid_reports_(cd);
+
+	while (buf < end) {
+		int item_type;
+		int item_size;
+		int item_tag;
+		u8 *data;
+
+		/* Get Item */
+		item_size = HID_GET_ITEM_SIZE(buf[0]);
+		if (item_size == 3)
+			item_size = 4;
+		item_type = HID_GET_ITEM_TYPE(buf[0]);
+		item_tag = HID_GET_ITEM_TAG(buf[0]);
+
+		data = ++buf;
+		buf += item_size;
+
+		/* Process current item */
+		switch (item_type) {
+		case HID_ITEM_TYPE_GLOBAL:
+			switch (item_tag) {
+			case HID_GLOBAL_ITEM_TAG_REPORT_ID:
+				if (item_size != 1) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				report_id = get_hid_item_data(data, item_size);
+				offset = 0;
+				logical_collection_count = 0;
+				break;
+			case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
+				if (item_size == 0 || item_size == 4) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				usage_page = (u16)get_hid_item_data(data,
+						item_size);
+				break;
+			case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				logical_min = get_hid_item_data(data,
+						item_size);
+				break;
+			case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				logical_max = get_hid_item_data(data,
+						item_size);
+				break;
+			case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				report_count = get_hid_item_data(data,
+						item_size);
+				break;
+			case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				report_size = get_hid_item_data(data,
+						item_size);
+				break;
+			default:
+				dev_info(cd->dev,
+					"%s: Unrecognized Global tag %d\n",
+					__func__, item_tag);
+			}
+			break;
+		case HID_ITEM_TYPE_LOCAL:
+			switch (item_tag) {
+			case HID_LOCAL_ITEM_TAG_USAGE:
+				if (item_size == 0 || item_size == 4) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				usage = (u16)get_hid_item_data(data,
+						item_size);
+				break;
+			case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				/* usage_min = */
+				get_hid_item_data(data, item_size);
+				break;
+			case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
+				if (item_size == 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				/* usage_max = */
+				get_hid_item_data(data, item_size);
+				break;
+			default:
+				dev_info(cd->dev,
+					"%s: Unrecognized Local tag %d\n",
+					__func__, item_tag);
+			}
+			break;
+		case HID_ITEM_TYPE_MAIN:
+			switch (item_tag) {
+			case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
+				if (item_size != 1) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				if (CY_HID_MAX_NESTED_COLLECTIONS ==
+						collection_nest) {
+					rc = -EINVAL;
+					goto exit;
+				}
+
+				up_usage = usage_page << 16 | usage;
+
+				/* Update collection stack */
+				collection_usages[collection_nest] = up_usage;
+				collection_types[collection_nest] =
+					get_hid_item_data(data, item_size);
+
+				if (collection_types[collection_nest] ==
+						HID_COLLECTION_LOGICAL)
+					logical_collection_count++;
+
+				collection_nest++;
+				break;
+			case HID_MAIN_ITEM_TAG_END_COLLECTION:
+				if (item_size != 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				if (--collection_nest < 0) {
+					rc = -EINVAL;
+					goto exit;
+				}
+				break;
+			case HID_MAIN_ITEM_TAG_INPUT:
+				report_type = HID_INPUT_REPORT;
+				goto continue_main_item;
+			case HID_MAIN_ITEM_TAG_OUTPUT:
+				report_type = HID_OUTPUT_REPORT;
+				goto continue_main_item;
+			case HID_MAIN_ITEM_TAG_FEATURE:
+				report_type = HID_FEATURE_REPORT;
+continue_main_item:
+				if (item_size != 1) {
+					rc = -EINVAL;
+					goto exit;
+				}
+
+				up_usage = usage_page << 16 | usage;
+
+				/* Get or create report */
+				report = cyttsp5_get_hid_report_(cd,
+						report_type, report_id, true);
+				if (!report) {
+					rc = -ENOMEM;
+					goto exit;
+				}
+				if (!report->usage_page && collection_nest > 0)
+					report->usage_page =
+						collection_usages
+							[collection_nest - 1];
+
+				/* Create field */
+				field = cyttsp5_create_hid_field_(report);
+				if (!field) {
+					rc = -ENOMEM;
+					goto exit;
+				}
+
+				field->report_count = report_count;
+				field->report_size = report_size;
+				field->size = report_count * report_size;
+				field->offset = offset;
+				field->data_type =
+					get_hid_item_data(data, item_size);
+				field->logical_min = logical_min;
+				field->logical_max = logical_max;
+				field->usage_page = up_usage;
+
+				for (i = 0; i < collection_nest; i++)
+					field->collection_usage_pages
+							[collection_types[i]] =
+						collection_usages[i];
+
+				/* Update report's header or record size */
+				if (logical_collection_count == 1)
+					report->header_size += field->size;
+				else if (logical_collection_count == 2) {
+					field->record_field = true;
+					field->offset -= report->header_size;
+					/* Set record field index */
+					if (report->record_field_index == 0)
+						report->record_field_index =
+							report->num_fields - 1;
+					report->record_size += field->size;
+				}
+
+				report->size += field->size;
+
+				offset += field->size;
+				break;
+			default:
+				dev_info(cd->dev,
+					"%s: Unrecognized Main tag %d\n",
+					__func__, item_tag);
+			}
+
+			/* Reset all local items */
+			usage = 0;
+			break;
+		}
+	}
+
+	if (buf != end) {
+		dev_err(cd->dev, "%s: Report descriptor length invalid\n",
+			__func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (collection_nest) {
+		dev_err(cd->dev, "%s: Unbalanced collection items (%d)\n",
+			__func__, collection_nest);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+exit:
+	if (rc)
+		cyttsp5_free_hid_reports_(cd);
+	mutex_unlock(&cd->hid_report_lock);
+	return rc;
+}
+
+static struct cyttsp5_hid_field *find_report_desc_field(
+		struct cyttsp5_core_data *cd, u32 usage_page,
+		u32 collection_usage_page)
+{
+	struct cyttsp5_hid_report *report = NULL;
+	struct cyttsp5_hid_field *field = NULL;
+	int i;
+	int j;
+	u32 field_cup;
+	u32 field_up;
+
+	for (i = 0; i < cd->num_hid_reports; i++) {
+		report = cd->hid_reports[i];
+		for (j = 0; j < report->num_fields; j++) {
+			field = report->fields[j];
+			field_cup = field->collection_usage_pages
+				[HID_COLLECTION_LOGICAL];
+			field_up = field->usage_page;
+			if (field_cup == collection_usage_page
+					&& field_up == usage_page)
+				return field;
+		}
+	}
+
+	return NULL;
+}
+
+static int fill_tch_abs(struct cyttsp5_tch_abs_params *tch_abs,
+		struct cyttsp5_hid_field *field)
+{
+	tch_abs->ofs = field->offset / 8;
+	tch_abs->size = field->report_size / 8;
+	if (field->report_size % 8)
+		tch_abs->size += 1;
+	tch_abs->min = 0;
+	tch_abs->max = 1 << field->report_size;
+	tch_abs->bofs = field->offset - (tch_abs->ofs << 3);
+
+	return 0;
+}
+
+static struct cyttsp5_hid_report *find_report_desc(struct cyttsp5_core_data *cd,
+		u32 usage_page)
+{
+	struct cyttsp5_hid_report *report = NULL;
+	int i;
+
+	for (i = 0; i < cd->num_hid_reports; i++)
+		if (cd->hid_reports[i]->usage_page == usage_page) {
+			report = cd->hid_reports[i];
+			break;
+		}
+
+	return report;
+}
+
+static int setup_report_descriptor(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	struct cyttsp5_hid_report *report;
+	struct cyttsp5_hid_field *field;
+	int i;
+	u32 tch_collection_usage_page = HID_CY_TCH_COL_USAGE_PG;
+	u32 btn_collection_usage_page = HID_CY_BTN_COL_USAGE_PG;
+
+	for (i = CY_TCH_X; i < CY_TCH_NUM_ABS; i++) {
+		field = find_report_desc_field(cd,
+				cyttsp5_tch_abs_field_map[i],
+				tch_collection_usage_page);
+		if (field) {
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" Field %pK: rep_cnt:%d rep_sz:%d off:%d",
+				field, field->report_count, field->report_size,
+				field->offset);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" data:%02X min:%d max:%d usage_page:%08X\n",
+				field->data_type, field->logical_min,
+				field->logical_max, field->usage_page);
+			fill_tch_abs(&si->tch_abs[i], field);
+			si->tch_abs[i].report = 1;
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: ofs:%u size:%u min:%u",
+				cyttsp5_tch_abs_string[i],
+				(u32)si->tch_abs[i].ofs,
+				(u32)si->tch_abs[i].size,
+				(u32)si->tch_abs[i].min);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" max:%u bofs:%u report:%d",
+				(u32)si->tch_abs[i].max,
+				(u32)si->tch_abs[i].bofs,
+				si->tch_abs[i].report);
+
+		} else
+			si->tch_abs[i].report = 0;
+	}
+	for (i = CY_TCH_TIME; i < CY_TCH_NUM_HDR; i++) {
+		field = find_report_desc_field(cd,
+				cyttsp5_tch_hdr_field_map[i],
+				tch_collection_usage_page);
+		if (field) {
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" Field %pK: rep_cnt:%d rep_sz:%d off:%d",
+				field, field->report_count, field->report_size,
+				field->offset);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" data:%02X min:%d max:%d usage_page:%08X\n",
+				field->data_type, field->logical_min,
+				field->logical_max, field->usage_page);
+			fill_tch_abs(&si->tch_hdr[i], field);
+			si->tch_hdr[i].report = 1;
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: ofs:%u size:%u min:%u",
+				cyttsp5_tch_hdr_string[i],
+				(u32)si->tch_hdr[i].ofs,
+				(u32)si->tch_hdr[i].size,
+				(u32)si->tch_hdr[i].min);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" max:%u bofs:%u report:%d",
+				(u32)si->tch_hdr[i].max,
+				(u32)si->tch_hdr[i].bofs,
+				si->tch_hdr[i].report);
+
+		} else
+			si->tch_hdr[i].report = 0;
+	}
+
+	report = find_report_desc(cd, tch_collection_usage_page);
+	if (report) {
+		si->desc.tch_report_id = report->id;
+		si->desc.tch_record_size = report->record_size / 8;
+		si->desc.tch_header_size = (report->header_size / 8) + 3;
+	} else {
+		si->desc.tch_report_id = HID_TOUCH_REPORT_ID;
+		si->desc.tch_record_size = TOUCH_REPORT_SIZE;
+		si->desc.tch_header_size = TOUCH_INPUT_HEADER_SIZE;
+	}
+
+	report = find_report_desc(cd, btn_collection_usage_page);
+	if (report)
+		si->desc.btn_report_id = report->id;
+	else
+		si->desc.btn_report_id = HID_BTN_REPORT_ID;
+
+	for (i = 0; i < cd->num_hid_reports; i++) {
+		struct cyttsp5_hid_report *report = cd->hid_reports[i];
+
+		switch (report->id) {
+		case HID_WAKEUP_REPORT_ID:
+			cd->features.easywake = 1;
+			break;
+		case HID_NOISE_METRIC_REPORT_ID:
+			cd->features.noise_metric = 1;
+			break;
+		case HID_TRACKING_HEATMAP_REPOR_ID:
+			cd->features.tracking_heatmap = 1;
+			break;
+		case HID_SENSOR_DATA_REPORT_ID:
+			cd->features.sensor_data = 1;
+			break;
+		default:
+			break;
+		}
+	}
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		"Features: easywake:%d noise_metric:%d",
+		cd->features.easywake, cd->features.noise_metric);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		" tracking_heatmap:%d sensor_data:%d\n",
+		cd->features.tracking_heatmap, cd->features.sensor_data);
+	return 0;
+}
+
+static int cyttsp5_get_report_descriptor_(struct cyttsp5_core_data *cd)
+{
+	struct device *dev = cd->dev;
+	u8 cmd[2];
+	int rc;
+	int t;
+
+	/* Read report descriptor length and version */
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 1;
+	mutex_unlock(&cd->system_lock);
+
+	/* Set report descriptor register */
+	memcpy(cmd, &cd->hid_desc.report_desc_register,
+		sizeof(cd->hid_desc.report_desc_register));
+
+	rc = cyttsp5_adap_write_read_specific(cd, 2, cmd, NULL);
+	if (rc) {
+		dev_err(dev,
+			"%s: get HID descriptor len and ver failed, rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+
+	t = wait_event_timeout(cd->wait_q, (cd->hid_cmd_state == 0),
+		msecs_to_jiffies(CY_HID_GET_REPORT_DESCRIPTOR_TIMEOUT));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: HID get descriptor timed out\n",
+			__func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	cyttsp5_pr_buf(cd->dev, cd->response_buf,
+		cd->hid_core.hid_report_desc_len, "Report Desc");
+
+	rc = parse_report_descriptor(cd, cd->response_buf + 3,
+		get_unaligned_le16(&cd->response_buf[0]) - 3);
+	if (rc)
+		dev_err(cd->dev, "%s: Error parsing report descriptor r=%d\n",
+			__func__, rc);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		"%s: %d reports found in descriptor\n",
+		__func__, cd->num_hid_reports);
+
+	for (t = 0; t < cd->num_hid_reports; t++) {
+		struct cyttsp5_hid_report *report = cd->hid_reports[t];
+		int j;
+
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"Report %d: type:%d id:%02X size:%d fields:%d",
+			t, report->type, report->id,
+			report->size, report->num_fields);
+
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"rec_fld_index:%d hdr_sz:%d rec_sz:%d usage_page:%X\n",
+			report->record_field_index, report->header_size,
+			report->record_size, report->usage_page);
+
+		for (j = 0; j < report->num_fields; j++) {
+			struct cyttsp5_hid_field *field = report->fields[j];
+
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" Field %d: rep_cnt:%d rep_sz:%d off:%d",
+				j, field->report_count, field->report_size,
+				field->offset);
+
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				" data:%02X min:%d max:%d usage_page:%08X\n",
+				field->data_type, field->logical_min,
+				field->logical_max, field->usage_page);
+
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"  Collections Phys:%08X App:%08X Log:%08X\n",
+				field->collection_usage_pages
+					[HID_COLLECTION_PHYSICAL],
+				field->collection_usage_pages
+					[HID_COLLECTION_APPLICATION],
+				field->collection_usage_pages
+					[HID_COLLECTION_LOGICAL]);
+		}
+	}
+
+	rc = setup_report_descriptor(cd);
+
+	/* Free it for now */
+	cyttsp5_free_hid_reports_(cd);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		"%s: %d reports found in descriptor\n",
+		__func__, cd->num_hid_reports);
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+exit:
+	return rc;
+}
+
+static int cyttsp5_get_mode(struct cyttsp5_core_data *cd,
+		struct cyttsp5_hid_desc *desc)
+{
+	if (desc->packet_id == CY_HID_APP_REPORT_ID)
+		return CY_MODE_OPERATIONAL;
+	else if (desc->packet_id == CY_HID_BL_REPORT_ID)
+		return CY_MODE_BOOTLOADER;
+
+	return CY_MODE_UNKNOWN;
+}
+
+static int _cyttsp5_request_get_mode(struct device *dev, int protect, u8 *mode)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int rc;
+
+	if (protect)
+		rc = cyttsp5_get_hid_descriptor(cd, &cd->hid_desc);
+	else
+		rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+
+	if (rc)
+		*mode = CY_MODE_UNKNOWN;
+	else
+		*mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+
+	return rc;
+}
+
+static void cyttsp5_queue_startup_(struct cyttsp5_core_data *cd)
+{
+	if (cd->startup_state == STARTUP_NONE) {
+		cd->startup_state = STARTUP_QUEUED;
+		schedule_work(&cd->startup_work);
+		dev_info(cd->dev, "%s: cyttsp5_startup queued\n", __func__);
+	} else
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: startup_state = %d\n",
+			__func__, cd->startup_state);
+
+}
+
+static void cyttsp5_queue_startup(struct cyttsp5_core_data *cd)
+{
+	mutex_lock(&cd->system_lock);
+	cyttsp5_queue_startup_(cd);
+	mutex_unlock(&cd->system_lock);
+}
+
+static void call_atten_cb(struct cyttsp5_core_data *cd,
+		enum cyttsp5_atten_type type, int mode)
+{
+	struct atten_node *atten, *atten_n;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: check list type=%d mode=%d\n",
+		__func__, type, mode);
+	spin_lock(&cd->spinlock);
+	list_for_each_entry_safe(atten, atten_n,
+			&cd->atten_list[type], node)
+		if (!mode || atten->mode & mode) {
+			spin_unlock(&cd->spinlock);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: attention for '%s'",
+				__func__, dev_name(atten->dev));
+			atten->func(atten->dev);
+			spin_lock(&cd->spinlock);
+		}
+
+	spin_unlock(&cd->spinlock);
+}
+
+static void cyttsp5_start_wd_timer(struct cyttsp5_core_data *cd)
+{
+	if (!cd->watchdog_interval)
+		return;
+
+	mod_timer(&cd->watchdog_timer, jiffies +
+			msecs_to_jiffies(cd->watchdog_interval));
+}
+
+static void cyttsp5_stop_wd_timer(struct cyttsp5_core_data *cd)
+{
+	if (!cd->watchdog_interval)
+		return;
+
+	/*
+	 * Ensure we wait until the watchdog timer
+	 * running on a different CPU finishes
+	 */
+	del_timer_sync(&cd->watchdog_timer);
+	cancel_work_sync(&cd->watchdog_work);
+	del_timer_sync(&cd->watchdog_timer);
+}
+
+static int start_fw_upgrade(void *data)
+{
+	struct cyttsp5_core_data *cd = (struct cyttsp5_core_data *)data;
+
+	call_atten_cb(cd, CY_ATTEN_LOADER, 0);
+	return 0;
+}
+
+static void cyttsp5_watchdog_work(struct work_struct *work)
+{
+	struct cyttsp5_core_data *cd =
+			container_of(work, struct cyttsp5_core_data,
+					watchdog_work);
+	int rc;
+	/*fix CDT207254
+	 *if found the current sleep_state is SS_SLEEPING
+	 *then no need to request_exclusive, directly return
+	 */
+	if (cd->sleep_state == SS_SLEEPING)
+		return;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		goto queue_startup;
+	}
+
+	rc = cyttsp5_hid_output_null_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+
+queue_startup:
+	if (rc) {
+		dev_err(cd->dev,
+			"%s: failed to access device in watchdog timer r=%d\n",
+			__func__, rc);
+
+		/* Already tried FW upgrade because of watchdog but failed */
+		if (cd->startup_retry_count > CY_WATCHDOG_RETRY_COUNT)
+			return;
+
+		if (cd->startup_retry_count++ < CY_WATCHDOG_RETRY_COUNT)
+			cyttsp5_queue_startup(cd);
+		else
+			kthread_run(start_fw_upgrade, cd, "cyttp5_loader");
+
+		return;
+	}
+
+	cyttsp5_start_wd_timer(cd);
+}
+
+static void cyttsp5_watchdog_timer(unsigned long handle)
+{
+	struct cyttsp5_core_data *cd = (struct cyttsp5_core_data *)handle;
+
+	if (!cd)
+		return;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: Watchdog timer triggered\n",
+		__func__);
+
+	if (!work_pending(&cd->watchdog_work))
+		schedule_work(&cd->watchdog_work);
+}
+
+static int cyttsp5_put_device_into_easy_wakeup_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+	u8 status = 0;
+
+	mutex_lock(&cd->system_lock);
+	cd->wait_until_wake = 0;
+	mutex_unlock(&cd->system_lock);
+
+	rc = cyttsp5_hid_output_enter_easywake_state_(cd,
+			cd->easy_wakeup_gesture, &status);
+	if (rc || status == 0)
+		return -EBUSY;
+
+	return rc;
+}
+
+#ifdef CY_GES_WAKEUP
+static int cyttsp5_core_wake_device_from_easy_wakeup_(
+		struct cyttsp5_core_data *cd)
+{
+	int rc;
+	u8 status = 0;
+
+	rc = cyttsp5_hid_output_exit_easywake_state_(cd,
+			cd->easy_wakeup_gesture, &status);
+	if (rc || status == 0) {
+		dev_err(cd->dev, "%s: failed, rc=%d, status=%d\n",
+			__func__, rc, status);
+		return -EBUSY;
+	}
+
+	return rc;
+}
+#endif
+
+static int cyttsp5_put_device_into_deep_sleep_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = cyttsp5_hid_cmd_set_power_(cd, HID_POWER_SLEEP);
+	if (rc)
+		rc = -EBUSY;
+	return rc;
+}
+
+static int cyttsp5_put_device_into_sleep_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	if (IS_DEEP_SLEEP_CONFIGURED(cd->easy_wakeup_gesture))
+		rc = cyttsp5_put_device_into_deep_sleep_(cd);
+	else
+		rc = cyttsp5_put_device_into_easy_wakeup_(cd);
+
+	return rc;
+}
+
+static int cyttsp5_core_poweroff_device_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	if (cd->irq_enabled) {
+		cd->irq_enabled = false;
+		disable_irq_nosync(cd->irq);
+	}
+
+	rc = cd->cpdata->power(cd->cpdata, 0, cd->dev, NULL);
+	if (rc < 0)
+		dev_err(cd->dev, "%s: HW Power down fails r=%d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int cyttsp5_core_sleep_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	if (cd->sleep_state == SS_SLEEP_OFF) {
+		cd->sleep_state = SS_SLEEPING;
+	} else {
+		mutex_unlock(&cd->system_lock);
+		return 1;
+	}
+	mutex_unlock(&cd->system_lock);
+
+	/* Ensure watchdog and startup works stopped */
+	cyttsp5_stop_wd_timer(cd);
+	cancel_work_sync(&cd->startup_work);
+	cyttsp5_stop_wd_timer(cd);
+
+	if (cd->cpdata->flags & CY_CORE_FLAG_POWEROFF_ON_SLEEP)
+		rc = cyttsp5_core_poweroff_device_(cd);
+	else
+		rc = cyttsp5_put_device_into_sleep_(cd);
+
+	mutex_lock(&cd->system_lock);
+	cd->sleep_state = SS_SLEEP_ON;
+	mutex_unlock(&cd->system_lock);
+
+	return rc;
+}
+
+static int cyttsp5_core_sleep(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_core_sleep_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+	else
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"%s: pass release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_wakeup_host(struct cyttsp5_core_data *cd)
+{
+#ifndef EASYWAKE_TSG6
+	/* TSG5 EasyWake */
+	int rc = 0;
+	int event_id;
+	int size = get_unaligned_le16(&cd->input_buf[0]);
+
+	/* Validate report */
+	if (size != 4 || cd->input_buf[2] != 4)
+		rc = -EINVAL;
+
+	cd->wake_initiated_by_device = 1;
+	event_id = cd->input_buf[3];
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: e=%d, rc=%d\n",
+		__func__, event_id, rc);
+
+	if (rc) {
+		cyttsp5_core_sleep_(cd);
+		goto exit;
+	}
+
+	/* attention WAKE */
+	call_atten_cb(cd, CY_ATTEN_WAKE, 0);
+exit:
+	return rc;
+#else
+	/* TSG6 FW1.3 EasyWake */
+	int rc = 0;
+	int i = 0;
+	int report_length;
+
+	/* Validate report */
+	if (cd->input_buf[2] != 4)
+		rc = -EINVAL;
+
+	cd->wake_initiated_by_device = 1;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: rc=%d\n", __func__, rc);
+
+	if (rc) {
+		cyttsp5_core_sleep_(cd);
+		goto exit;
+	}
+
+	/* Get gesture id and gesture data length */
+	cd->gesture_id = cd->input_buf[3];
+	report_length = (cd->input_buf[1] << 8) | (cd->input_buf[0]);
+	cd->gesture_data_length = report_length - 4;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1,
+		"%s: gesture_id = %d, gesture_data_length = %d\n",
+		__func__, cd->gesture_id, cd->gesture_data_length);
+
+	for (i = 0; i < cd->gesture_data_length; i++)
+		cd->gesture_data[i] = cd->input_buf[4 + i];
+
+	/* attention WAKE */
+	call_atten_cb(cd, CY_ATTEN_WAKE, 0);
+exit:
+	return rc;
+#endif
+}
+
+static void cyttsp5_get_touch_axis(struct cyttsp5_core_data *cd,
+	int *axis, int size, int max, u8 *data, int bofs)
+{
+	int nbyte;
+	int next;
+
+	for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) {
+		*axis = *axis + ((data[next] >> bofs) << (nbyte * 8));
+		next++;
+	}
+
+	*axis &= max - 1;
+}
+
+static int move_tracking_hetmap_data(struct cyttsp5_core_data *cd,
+	struct cyttsp5_sysinfo *si)
+{
+#ifdef TTHE_TUNER_SUPPORT
+	int size = get_unaligned_le16(&cd->input_buf[0]);
+
+	if (size)
+		tthe_print(cd, cd->input_buf, size, "THM=");
+#endif
+	memcpy(si->xy_mode, cd->input_buf, SENSOR_HEADER_SIZE);
+	return 0;
+}
+
+static int move_sensor_data(struct cyttsp5_core_data *cd,
+	struct cyttsp5_sysinfo *si)
+{
+#ifdef TTHE_TUNER_SUPPORT
+	int size = get_unaligned_le16(&cd->input_buf[0]);
+
+	if (size)
+		tthe_print(cd, cd->input_buf, size, "sensor_monitor=");
+#endif
+	memcpy(si->xy_mode, cd->input_buf, SENSOR_HEADER_SIZE);
+	return 0;
+}
+
+static int move_button_data(struct cyttsp5_core_data *cd,
+	struct cyttsp5_sysinfo *si)
+{
+#ifdef TTHE_TUNER_SUPPORT
+	int size = get_unaligned_le16(&cd->input_buf[0]);
+
+	if (size)
+		tthe_print(cd, cd->input_buf, size, "OpModeData=");
+#endif
+	memcpy(si->xy_mode, cd->input_buf, BTN_INPUT_HEADER_SIZE);
+	cyttsp5_pr_buf(cd->dev, (u8 *)si->xy_mode, BTN_INPUT_HEADER_SIZE,
+			"xy_mode");
+
+	memcpy(si->xy_data, &cd->input_buf[BTN_INPUT_HEADER_SIZE],
+			BTN_REPORT_SIZE);
+	cyttsp5_pr_buf(cd->dev, (u8 *)si->xy_data, BTN_REPORT_SIZE, "xy_data");
+	return 0;
+}
+
+static int move_touch_data(struct cyttsp5_core_data *cd,
+	struct cyttsp5_sysinfo *si)
+{
+	int max_tch = si->sensing_conf_data.max_tch;
+	int num_cur_tch;
+	int length;
+	struct cyttsp5_tch_abs_params *tch = &si->tch_hdr[CY_TCH_NUM];
+#ifdef TTHE_TUNER_SUPPORT
+	int size = get_unaligned_le16(&cd->input_buf[0]);
+
+	if (size)
+		tthe_print(cd, cd->input_buf, size, "OpModeData=");
+#endif
+
+	memcpy(si->xy_mode, cd->input_buf, si->desc.tch_header_size);
+	cyttsp5_pr_buf(cd->dev, (u8 *)si->xy_mode, si->desc.tch_header_size,
+			"xy_mode");
+
+	cyttsp5_get_touch_axis(cd, &num_cur_tch, tch->size,
+			tch->max, si->xy_mode + 3 + tch->ofs, tch->bofs);
+	if (unlikely(num_cur_tch > max_tch))
+		num_cur_tch = max_tch;
+
+	length = num_cur_tch * si->desc.tch_record_size;
+
+	memcpy(si->xy_data, &cd->input_buf[si->desc.tch_header_size], length);
+	cyttsp5_pr_buf(cd->dev, (u8 *)si->xy_data, length, "xy_data");
+	return 0;
+}
+
+static int parse_touch_input(struct cyttsp5_core_data *cd, int size)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int report_id = cd->input_buf[2];
+	int rc = -EINVAL;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: Received touch report\n",
+		__func__);
+	if (!si->ready) {
+		dev_err(cd->dev,
+			"%s: Need system information to parse touches\n",
+			__func__);
+		return 0;
+	}
+
+	if (!si->xy_mode || !si->xy_data)
+		return rc;
+
+	if (report_id == si->desc.tch_report_id)
+		rc = move_touch_data(cd, si);
+	else if (report_id == si->desc.btn_report_id)
+		rc = move_button_data(cd, si);
+	else if (report_id == HID_SENSOR_DATA_REPORT_ID)
+		rc = move_sensor_data(cd, si);
+	else if (report_id == HID_TRACKING_HEATMAP_REPOR_ID)
+		rc = move_tracking_hetmap_data(cd, si);
+
+	if (rc)
+		return rc;
+
+	/* attention IRQ */
+	call_atten_cb(cd, CY_ATTEN_IRQ, cd->mode);
+
+	return 0;
+}
+
+static int parse_command_input(struct cyttsp5_core_data *cd, int size)
+{
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: Received cmd interrupt\n",
+		__func__);
+
+	memcpy(cd->response_buf, cd->input_buf, size);
+
+	mutex_lock(&cd->system_lock);
+	cd->hid_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+	wake_up(&cd->wait_q);
+
+	return 0;
+}
+
+static int cyttsp5_parse_input(struct cyttsp5_core_data *cd)
+{
+	int report_id;
+	int is_command = 0;
+	int size;
+
+	size = get_unaligned_le16(&cd->input_buf[0]);
+
+	/* check reset */
+	if (size == 0) {
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Reset complete\n",
+			__func__);
+		memcpy(cd->response_buf, cd->input_buf, 2);
+		mutex_lock(&cd->system_lock);
+		if (!cd->hid_reset_cmd_state && !cd->hid_cmd_state) {
+			mutex_unlock(&cd->system_lock);
+			parade_debug(cd->dev, DEBUG_LEVEL_1,
+				"%s: Device Initiated Reset\n", __func__);
+			return 0;
+		}
+
+		cd->hid_reset_cmd_state = 0;
+		if (cd->hid_cmd_state == HID_OUTPUT_START_BOOTLOADER + 1
+			|| cd->hid_cmd_state == HID_OUTPUT_BL_LAUNCH_APP + 1
+			|| cd->hid_cmd_state == HID_OUTPUT_USER_CMD + 1)
+			cd->hid_cmd_state = 0;
+		wake_up(&cd->wait_q);
+		mutex_unlock(&cd->system_lock);
+		return 0;
+	} else if (size == 2 || size >= CY_PIP_1P7_EMPTY_BUF)
+		/*
+		 * Before PIP 1.7, empty buffer is 0x0002;
+		 * From PIP 1.7, empty buffer is 0xFFXX
+		 */
+		return 0;
+
+	report_id = cd->input_buf[2];
+	parade_debug(cd->dev, DEBUG_LEVEL_2, "%s: report_id:%X\n",
+		__func__, report_id);
+
+	/* Check wake-up report */
+	if (report_id == HID_WAKEUP_REPORT_ID) {
+		cyttsp5_wakeup_host(cd);
+		return 0;
+	}
+
+	/* update watchdog expire time */
+	mod_timer_pending(&cd->watchdog_timer, jiffies +
+			msecs_to_jiffies(cd->watchdog_interval));
+
+	if (report_id != cd->sysinfo.desc.tch_report_id
+			&& report_id != cd->sysinfo.desc.btn_report_id
+			&& report_id != HID_SENSOR_DATA_REPORT_ID
+			&& report_id != HID_TRACKING_HEATMAP_REPOR_ID)
+		is_command = 1;
+
+	if (size > CY_MAX_INPUT)
+		size = CY_MAX_INPUT;
+
+	if (unlikely(is_command)) {
+		parse_command_input(cd, size);
+		return 0;
+	}
+	parse_touch_input(cd, size);
+	return 0;
+}
+
+static int cyttsp5_read_input(struct cyttsp5_core_data *cd)
+{
+	struct device *dev = cd->dev;
+	int rc;
+	int t;
+
+	/* added as workaround to CDT170960: easywake failure */
+	/* Interrupt for easywake, wait for bus controller to wake */
+	mutex_lock(&cd->system_lock);
+	if (!IS_DEEP_SLEEP_CONFIGURED(cd->easy_wakeup_gesture)) {
+		if (cd->sleep_state == SS_SLEEP_ON) {
+			mutex_unlock(&cd->system_lock);
+			if (!dev->power.is_suspended)
+				goto read;
+			t = wait_event_timeout(cd->wait_q,
+					(cd->wait_until_wake == 1),
+					msecs_to_jiffies(2000));
+			if (IS_TMO(t))
+				cyttsp5_queue_startup(cd);
+			goto read;
+		}
+	}
+	mutex_unlock(&cd->system_lock);
+
+read:
+	rc = cyttsp5_adap_read_default_nosize(cd, cd->input_buf, CY_MAX_INPUT);
+	if (rc) {
+		dev_err(dev, "%s: Error getting report, r=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Read input successfully\n",
+		__func__);
+	return rc;
+}
+
+static  bool cyttsp5_check_irq_asserted(struct cyttsp5_core_data *cd)
+{
+#ifdef ENABLE_WORKAROUND_FOR_GLITCH_AFTER_BL_LAUNCH_APP
+	/*
+	 * Workaround for FW defect, CDT165308
+	 * bl_launch app creates a glitch in IRQ line
+	 */
+	if (cd->hid_cmd_state == HID_OUTPUT_BL_LAUNCH_APP + 1
+			&& cd->cpdata->irq_stat){
+		/*
+		 * in X1S panel and GC1546 panel, the width for the INT
+		 * glitch is about 4us,the normal INT width of response
+		 * will last more than 200us, so use 10us delay
+		 * for distinguish the glitch the normal INT is enough.
+		 */
+		udelay(10);
+		if (cd->cpdata->irq_stat(cd->cpdata, cd->dev)
+			!= CY_IRQ_ASSERTED_VALUE)
+			return false;
+	}
+#endif
+	return true;
+}
+
+
+static irqreturn_t cyttsp5_irq(int irq, void *handle)
+{
+	struct cyttsp5_core_data *cd = handle;
+	int rc;
+
+	if (!cyttsp5_check_irq_asserted(cd))
+		return IRQ_HANDLED;
+
+	rc = cyttsp5_read_input(cd);
+	if (!rc)
+		cyttsp5_parse_input(cd);
+
+	return IRQ_HANDLED;
+}
+
+int _cyttsp5_subscribe_attention(struct device *dev,
+		enum cyttsp5_atten_type type, char *id,
+		int (*func)(struct device *), int mode)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct atten_node *atten, *atten_new;
+
+	atten_new = kzalloc(sizeof(*atten_new), GFP_KERNEL);
+	if (!atten_new)
+		return -ENOMEM;
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s from '%s'\n", __func__,
+		dev_name(cd->dev));
+
+	spin_lock(&cd->spinlock);
+	list_for_each_entry(atten, &cd->atten_list[type], node)
+		if (atten->id == id && atten->mode == mode) {
+			spin_unlock(&cd->spinlock);
+			kfree(atten_new);
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: %s=%pK %s=%d\n",
+				__func__, "already subscribed attention",
+				dev, "mode", mode);
+
+			return 0;
+		}
+
+	atten_new->id = id;
+	atten_new->dev = dev;
+	atten_new->mode = mode;
+	atten_new->func = func;
+
+	list_add(&atten_new->node, &cd->atten_list[type]);
+	spin_unlock(&cd->spinlock);
+
+	return 0;
+}
+
+int _cyttsp5_unsubscribe_attention(struct device *dev,
+		enum cyttsp5_atten_type type, char *id,
+		int (*func)(struct device *), int mode)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct atten_node *atten, *atten_n;
+
+	spin_lock(&cd->spinlock);
+	list_for_each_entry_safe(atten, atten_n, &cd->atten_list[type], node)
+		if (atten->id == id && atten->mode == mode) {
+			list_del(&atten->node);
+			spin_unlock(&cd->spinlock);
+
+			parade_debug(cd->dev, DEBUG_LEVEL_2,
+				"%s: %s=%pK %s=%d\n",
+				__func__, "unsub for atten->dev",
+				atten->dev, "atten->mode", atten->mode);
+			kfree(atten);
+			return 0;
+		}
+
+	spin_unlock(&cd->spinlock);
+
+	return -ENODEV;
+}
+
+static int _cyttsp5_request_exclusive(struct device *dev,
+		int timeout_ms)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return request_exclusive(cd, (void *)dev, timeout_ms);
+}
+
+static int _cyttsp5_release_exclusive(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return release_exclusive(cd, (void *)dev);
+}
+
+static int cyttsp5_reset(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	/* reset hardware */
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: reset hw...\n", __func__);
+	rc = cyttsp5_hw_reset(cd);
+	if (rc < 0)
+		dev_err(cd->dev, "%s: %s dev='%s' r=%d\n", __func__,
+			"Fail hw reset", dev_name(cd->dev), rc);
+	return rc;
+}
+
+static int cyttsp5_reset_and_wait(struct cyttsp5_core_data *cd)
+{
+	int rc;
+	int t;
+
+	mutex_lock(&cd->system_lock);
+	cd->hid_reset_cmd_state = 1;
+	mutex_unlock(&cd->system_lock);
+
+	rc = cyttsp5_reset(cd);
+	if (rc < 0)
+		goto error;
+
+	t = wait_event_timeout(cd->wait_q, (cd->hid_reset_cmd_state == 0),
+			msecs_to_jiffies(CY_HID_RESET_TIMEOUT));
+	if (IS_TMO(t)) {
+		dev_err(cd->dev, "%s: reset timed out\n", __func__);
+		rc = -ETIME;
+		goto error;
+	}
+
+	goto exit;
+
+error:
+	mutex_lock(&cd->system_lock);
+	cd->hid_reset_cmd_state = 0;
+	mutex_unlock(&cd->system_lock);
+exit:
+	return rc;
+}
+
+/*
+ * returns err if refused or timeout(core uses fixed timeout period) occurs;
+ * blocks until ISR occurs
+ */
+static int _cyttsp5_request_reset(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	cd->hid_reset_cmd_state = 1;
+	mutex_unlock(&cd->system_lock);
+
+	rc = cyttsp5_reset(cd);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: Error on h/w reset r=%d\n",
+			__func__, rc);
+		mutex_lock(&cd->system_lock);
+		cd->hid_reset_cmd_state = 0;
+		mutex_unlock(&cd->system_lock);
+	}
+
+	return rc;
+}
+
+/*
+ * returns err if refused ; if no error then restart has completed
+ * and system is in normal operating mode
+ */
+static int _cyttsp5_request_restart(struct device *dev, bool wait)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	cyttsp5_queue_startup(cd);
+
+	if (wait)
+		wait_event(cd->wait_q, cd->startup_state == STARTUP_NONE);
+
+	return 0;
+}
+
+/*
+ * returns NULL if sysinfo has not been acquired from the device yet
+ */
+struct cyttsp5_sysinfo *_cyttsp5_request_sysinfo(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (cd->sysinfo.ready)
+		return &cd->sysinfo;
+
+	return NULL;
+}
+
+static struct cyttsp5_loader_platform_data *_cyttsp5_request_loader_pdata(
+		struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return cd->pdata->loader_pdata;
+}
+
+static int _cyttsp5_request_start_wd(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	cyttsp5_start_wd_timer(cd);
+	return 0;
+}
+
+static int _cyttsp5_request_stop_wd(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	cyttsp5_stop_wd_timer(cd);
+	return 0;
+}
+
+static int cyttsp5_core_wake_device_from_deep_sleep_(
+		struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = cyttsp5_hid_cmd_set_power_(cd, HID_POWER_ON);
+	if (rc)
+		rc =  -EAGAIN;
+
+	/* Prevent failure on sequential wake/sleep requests from OS */
+	msleep(20);
+
+	return rc;
+}
+
+static int cyttsp5_core_wake_device_(struct cyttsp5_core_data *cd)
+{
+	if (!IS_DEEP_SLEEP_CONFIGURED(cd->easy_wakeup_gesture)) {
+
+		#ifdef CY_GES_WAKEUP
+		return cyttsp5_core_wake_device_from_easy_wakeup_(cd);
+		#endif
+	}
+
+	return cyttsp5_core_wake_device_from_deep_sleep_(cd);
+}
+
+static int cyttsp5_restore_parameters_(struct cyttsp5_core_data *cd)
+{
+	struct param_node *param;
+	int rc = 0;
+
+	if (!(cd->cpdata->flags & CY_CORE_FLAG_RESTORE_PARAMETERS))
+		goto exit;
+
+	spin_lock(&cd->spinlock);
+	list_for_each_entry(param, &cd->param_list, node) {
+		spin_unlock(&cd->spinlock);
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"%s: Parameter id:%d value:%d\n",
+			__func__, param->id, param->value);
+		rc = cyttsp5_hid_output_set_param_(cd, param->id,
+				param->value, param->size);
+		if (rc)
+			goto exit;
+		spin_lock(&cd->spinlock);
+	}
+	spin_unlock(&cd->spinlock);
+exit:
+	return rc;
+}
+
+static int _fast_startup(struct cyttsp5_core_data *cd)
+{
+	int retry = CY_CORE_STARTUP_RETRY_COUNT;
+	int rc;
+
+reset:
+	if (retry != CY_CORE_STARTUP_RETRY_COUNT)
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Retry %d\n",
+			__func__, CY_CORE_STARTUP_RETRY_COUNT - retry);
+
+	rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: Error on getting HID descriptor r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+	cd->mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+
+	if (cd->mode == CY_MODE_BOOTLOADER) {
+		dev_info(cd->dev, "%s: Bootloader mode\n", __func__);
+		rc = cyttsp5_hid_output_bl_launch_app_(cd);
+		if (rc < 0) {
+			dev_err(cd->dev, "%s: Error on launch app r=%d\n",
+				__func__, rc);
+			if (retry--)
+				goto reset;
+			goto exit;
+		}
+		rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+		if (rc < 0) {
+			dev_err(cd->dev,
+				"%s: Error on getting HID descriptor r=%d\n",
+				__func__, rc);
+			if (retry--)
+				goto reset;
+			goto exit;
+		}
+		cd->mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+		if (cd->mode == CY_MODE_BOOTLOADER) {
+			if (retry--)
+				goto reset;
+			goto exit;
+		}
+	}
+
+	rc = cyttsp5_restore_parameters_(cd);
+	if (rc)
+		dev_err(cd->dev, "%s: failed to restore parameters rc=%d\n",
+			__func__, rc);
+
+exit:
+	return rc;
+}
+
+static int cyttsp5_core_poweron_device_(struct cyttsp5_core_data *cd)
+{
+	struct device *dev = cd->dev;
+	int rc;
+
+	rc = cd->cpdata->power(cd->cpdata, 1, dev, NULL);
+	if (rc < 0) {
+		dev_err(dev, "%s: HW Power up fails r=%d\n", __func__, rc);
+		goto exit;
+	}
+
+	if (!cd->irq_enabled) {
+		cd->irq_enabled = true;
+		enable_irq(cd->irq);
+	}
+
+	rc = _fast_startup(cd);
+exit:
+	return rc;
+}
+
+static int cyttsp5_core_wake_(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	if (cd->sleep_state == SS_SLEEP_ON)
+		cd->sleep_state = SS_WAKING;
+	else {
+		mutex_unlock(&cd->system_lock);
+		return 1;
+	}
+	mutex_unlock(&cd->system_lock);
+
+	if (cd->cpdata->flags & CY_CORE_FLAG_POWEROFF_ON_SLEEP)
+		rc = cyttsp5_core_poweron_device_(cd);
+	else
+		rc = cyttsp5_core_wake_device_(cd);
+
+	mutex_lock(&cd->system_lock);
+	cd->sleep_state = SS_SLEEP_OFF;
+	mutex_unlock(&cd->system_lock);
+
+	cyttsp5_start_wd_timer(cd);
+	return rc;
+}
+
+static int cyttsp5_core_wake(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+			__func__, cd->exclusive_dev, cd->dev);
+		return rc;
+	}
+
+	rc = cyttsp5_core_wake_(cd);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+	else
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"%s: pass release exclusive\n", __func__);
+
+	return rc;
+}
+
+static int cyttsp5_get_ic_crc_(struct cyttsp5_core_data *cd, u8 ebid)
+{
+	struct cyttsp5_sysinfo *si = &cd->sysinfo;
+	int rc;
+	u8 status;
+	u16 calculated_crc = 0;
+	u16 stored_crc = 0;
+
+	rc = cyttsp5_hid_output_suspend_scanning_(cd);
+	if (rc)
+		goto error;
+
+	rc = cyttsp5_hid_output_verify_config_block_crc_(cd, ebid, &status,
+			&calculated_crc, &stored_crc);
+	if (rc)
+		goto exit;
+
+	if (status) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	si->ttconfig.crc = stored_crc;
+
+exit:
+	cyttsp5_hid_output_resume_scanning_(cd);
+error:
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: CRC: ebid:%d, crc:0x%04X\n",
+		__func__, ebid, si->ttconfig.crc);
+	return rc;
+}
+
+static int cyttsp5_check_and_deassert_int(struct cyttsp5_core_data *cd)
+{
+	u16 size;
+	u8 buf[2];
+	u8 *p;
+	u8 retry = 3;
+	int rc;
+
+	do {
+		rc = cyttsp5_adap_read_default(cd, buf, 2);
+		if (rc < 0)
+			return rc;
+		size = get_unaligned_le16(&buf[0]);
+
+		if (size == 2 || size == 0 || size >= CY_PIP_1P7_EMPTY_BUF)
+			return 0;
+
+		p = kzalloc(size, GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+
+		rc = cyttsp5_adap_read_default(cd, p, size);
+		kfree(p);
+		if (rc < 0)
+			return rc;
+	} while (retry--);
+
+	return -EINVAL;
+}
+
+static int cyttsp5_startup_(struct cyttsp5_core_data *cd, bool reset)
+{
+	int retry = CY_CORE_STARTUP_RETRY_COUNT;
+	int rc;
+	bool detected = false;
+
+#ifdef TTHE_TUNER_SUPPORT
+	tthe_print(cd, NULL, 0, "enter startup");
+#endif
+
+	cyttsp5_stop_wd_timer(cd);
+
+reset:
+	if (retry != CY_CORE_STARTUP_RETRY_COUNT)
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Retry %d\n",
+			__func__, CY_CORE_STARTUP_RETRY_COUNT - retry);
+
+	rc = cyttsp5_check_and_deassert_int(cd);
+
+	if (reset || retry != CY_CORE_STARTUP_RETRY_COUNT) {
+		/* reset hardware */
+		rc = cyttsp5_reset_and_wait(cd);
+		if (rc < 0) {
+			dev_err(cd->dev, "%s: Error on h/w reset r=%d\n",
+					__func__, rc);
+			if (retry--)
+				goto reset;
+			goto exit;
+		}
+	}
+
+	rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: Error on getting HID descriptor r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+	cd->mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+
+	detected = true;
+
+	/* Switch to bootloader mode to get Panel ID */
+	if (cd->mode == CY_MODE_OPERATIONAL) {
+		rc = cyttsp5_hid_output_start_bootloader_(cd);
+		if (rc < 0) {
+			dev_err(cd->dev, "%s: Error on start bootloader r=%d\n",
+				__func__, rc);
+			if (retry--)
+				goto reset;
+			goto exit;
+		}
+		dev_info(cd->dev, "%s: Bootloader mode\n", __func__);
+	}
+
+	cyttsp5_hid_output_bl_get_panel_id_(cd, &cd->panel_id);
+
+	parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Panel ID: 0x%02X\n",
+		__func__, cd->panel_id);
+
+	rc = cyttsp5_hid_output_bl_launch_app_(cd);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: Error on launch app r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+
+	rc = cyttsp5_get_hid_descriptor_(cd, &cd->hid_desc);
+	if (rc < 0) {
+		dev_err(cd->dev,
+			"%s: Error on getting HID descriptor r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+	cd->mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+	if (cd->mode == CY_MODE_BOOTLOADER) {
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+
+	mutex_lock(&cd->system_lock);
+	/* Read descriptor lengths */
+	cd->hid_core.hid_report_desc_len =
+		le16_to_cpu(cd->hid_desc.report_desc_len);
+	cd->hid_core.hid_max_input_len =
+		le16_to_cpu(cd->hid_desc.max_input_len);
+	cd->hid_core.hid_max_output_len =
+		le16_to_cpu(cd->hid_desc.max_output_len);
+
+	cd->mode = cyttsp5_get_mode(cd, &cd->hid_desc);
+	if (cd->mode == CY_MODE_OPERATIONAL)
+		dev_dbg(cd->dev, "%s: Operational mode\n", __func__);
+	else if (cd->mode == CY_MODE_BOOTLOADER)
+		dev_dbg(cd->dev, "%s: Bootloader mode\n", __func__);
+	else if (cd->mode == CY_MODE_UNKNOWN) {
+		dev_err(cd->dev, "%s: Unknown mode\n", __func__);
+		rc = -ENODEV;
+		mutex_unlock(&cd->system_lock);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+	mutex_unlock(&cd->system_lock);
+
+	dev_dbg(cd->dev, "%s: Reading report descriptor\n", __func__);
+	rc = cyttsp5_get_report_descriptor_(cd);
+	if (rc < 0) {
+		dev_err(cd->dev,
+			"%s: Error on getting report descriptor r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+
+	if (!cd->features.easywake)
+		cd->easy_wakeup_gesture = CY_CORE_EWG_NONE;
+
+	rc = cyttsp5_hid_output_get_sysinfo_(cd);
+	if (rc) {
+		dev_err(cd->dev, "%s: Error on getting sysinfo r=%d\n",
+			__func__, rc);
+		if (retry--)
+			goto reset;
+		goto exit;
+	}
+
+	dev_info(cd->dev, "cyttsp5 Protocol Version: %d.%d\n",
+			cd->sysinfo.cydata.pip_ver_major,
+			cd->sysinfo.cydata.pip_ver_minor);
+
+	/* Read config version directly if PIP version < 1.2 */
+	if (!IS_PIP_VER_GE(&cd->sysinfo, 1, 2)) {
+		rc = cyttsp5_get_config_ver_(cd);
+		if (rc)
+			dev_err(cd->dev,
+				"%s: failed to read config version rc=%d\n",
+				__func__, rc);
+	}
+
+	rc = cyttsp5_get_ic_crc_(cd, CY_TCH_PARM_EBID);
+	if (rc)
+		dev_err(cd->dev, "%s: failed to crc data rc=%d\n",
+			__func__, rc);
+
+	rc = cyttsp5_restore_parameters_(cd);
+	if (rc)
+		dev_err(cd->dev, "%s: failed to restore parameters rc=%d\n",
+			__func__, rc);
+
+	/* attention startup */
+	call_atten_cb(cd, CY_ATTEN_STARTUP, 0);
+
+exit:
+	if (!rc)
+		cd->startup_retry_count = 0;
+
+	cyttsp5_start_wd_timer(cd);
+
+	if (!detected)
+		rc = -ENODEV;
+
+#ifdef TTHE_TUNER_SUPPORT
+	tthe_print(cd, NULL, 0, "exit startup");
+#endif
+
+	return rc;
+}
+
+static int cyttsp5_startup(struct cyttsp5_core_data *cd, bool reset)
+{
+	int rc;
+
+	mutex_lock(&cd->system_lock);
+	cd->startup_state = STARTUP_RUNNING;
+	mutex_unlock(&cd->system_lock);
+
+	rc = request_exclusive(cd, cd->dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(cd->dev, "%s: fail get exclusive ex=%pK own=%pK\n",
+				__func__, cd->exclusive_dev, cd->dev);
+		goto exit;
+	}
+
+	rc = cyttsp5_startup_(cd, reset);
+
+	if (release_exclusive(cd, cd->dev) < 0)
+		/* Don't return fail code, mode is already changed. */
+		dev_err(cd->dev, "%s: fail to release exclusive\n", __func__);
+	else
+		parade_debug(cd->dev, DEBUG_LEVEL_2,
+			"%s: pass release exclusive\n", __func__);
+
+exit:
+	mutex_lock(&cd->system_lock);
+	cd->startup_state = STARTUP_NONE;
+	mutex_unlock(&cd->system_lock);
+
+	/* Wake the waiters for end of startup */
+	wake_up(&cd->wait_q);
+
+	return rc;
+}
+
+static void cyttsp5_startup_work_function(struct work_struct *work)
+{
+	struct cyttsp5_core_data *cd =  container_of(work,
+		struct cyttsp5_core_data, startup_work);
+	int rc;
+
+	rc = cyttsp5_startup(cd, true);
+	if (rc < 0)
+		dev_err(cd->dev, "%s: Fail queued startup r=%d\n",
+			__func__, rc);
+}
+
+/*
+ * CONFIG_PM_RUNTIME option is removed in 3.19.0.
+ */
+
+static int cyttsp5_core_rt_suspend(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int rc;
+
+	rc = cyttsp5_core_sleep(cd);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on sleep\n", __func__);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static int cyttsp5_core_rt_resume(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int rc;
+
+	rc = cyttsp5_core_wake(cd);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on wake\n", __func__);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+
+#if defined(CONFIG_PM_SLEEP)
+static int cyttsp5_core_suspend(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	cyttsp5_core_sleep(cd);
+
+	if (IS_DEEP_SLEEP_CONFIGURED(cd->easy_wakeup_gesture))
+		return 0;
+
+	/*
+	 * This will not prevent resume
+	 * Required to prevent interrupts before i2c awake
+	 */
+	disable_irq(cd->irq);
+	cd->irq_disabled = 1;
+
+	if (device_may_wakeup(dev)) {
+		parade_debug(dev, DEBUG_LEVEL_2, "%s Device MAY wakeup\n",
+			__func__);
+		if (!enable_irq_wake(cd->irq))
+			cd->irq_wake = 1;
+	} else
+		parade_debug(dev, DEBUG_LEVEL_1, "%s Device MAY NOT wakeup\n",
+			__func__);
+
+	return 0;
+}
+
+static int cyttsp5_core_resume(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	if (IS_DEEP_SLEEP_CONFIGURED(cd->easy_wakeup_gesture))
+		goto exit;
+
+	/*
+	 * I2C bus pm does not call suspend if device runtime suspended
+	 * This flag is cover that case
+	 */
+	if (cd->irq_disabled) {
+		enable_irq(cd->irq);
+		cd->irq_disabled = 0;
+	}
+
+	if (device_may_wakeup(dev)) {
+		parade_debug(dev, DEBUG_LEVEL_2, "%s Device MAY wakeup\n",
+			__func__);
+		if (cd->irq_wake) {
+			disable_irq_wake(cd->irq);
+			cd->irq_wake = 0;
+		}
+	} else
+		parade_debug(dev, DEBUG_LEVEL_1, "%s Device MAY NOT wakeup\n",
+			__func__);
+
+exit:
+	cyttsp5_core_wake(cd);
+
+	return 0;
+}
+#endif
+
+#ifdef NEED_SUSPEND_NOTIFIER
+static int cyttsp5_pm_notifier(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct cyttsp5_core_data *cd = container_of(nb,
+			struct cyttsp5_core_data, pm_notifier);
+
+	if (action == PM_SUSPEND_PREPARE) {
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Suspend prepare\n",
+			__func__);
+
+		/*
+		 * If not runtime PM suspended, either call runtime
+		 * PM suspend callback or wait until it finishes
+		 */
+		if (!pm_runtime_suspended(cd->dev))
+			pm_runtime_suspend(cd->dev);
+
+		(void) cyttsp5_core_suspend(cd->dev);
+	}
+
+	return NOTIFY_DONE;
+}
+#endif
+
+const struct dev_pm_ops cyttsp5_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(cyttsp5_core_suspend, cyttsp5_core_resume)
+	SET_RUNTIME_PM_OPS(cyttsp5_core_rt_suspend, cyttsp5_core_rt_resume,
+			NULL)
+};
+EXPORT_SYMBOL_GPL(cyttsp5_pm_ops);
+
+/*
+ * Show Firmware version via sysfs
+ */
+static ssize_t cyttsp5_ic_ver_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_cydata *cydata = &cd->sysinfo.cydata;
+
+	return snprintf(buf, PAGE_SIZE,
+		"%s: 0x%02X\n"
+		"%s: 0x%02X\n"
+		"%s: 0x%08X\n"
+		"%s: 0x%04X\n"
+		"%s: 0x%02X\n"
+		"%s: 0x%02X\n"
+		"%s: 0x%02X\n"
+		"%s: 0x%02X\n",
+		"Firmware Major Version", cydata->fw_ver_major,
+		"Firmware Minor Version", cydata->fw_ver_minor,
+		"Revision Control Number", cydata->revctrl,
+		"Firmware Configuration Version", cydata->fw_ver_conf,
+		"Bootloader Major Version", cydata->bl_ver_major,
+		"Bootloader Minor Version", cydata->bl_ver_minor,
+		"Protocol Major Version", cydata->pip_ver_major,
+		"Protocol Minor Version", cydata->pip_ver_minor);
+}
+
+/*
+ * Show Driver version via sysfs
+ */
+static ssize_t cyttsp5_drv_ver_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Driver: %s\nVersion: %s\nDate: %s\n",
+		cy_driver_core_name, cy_driver_core_version,
+		cy_driver_core_date);
+}
+
+/*
+ * HW reset via sysfs
+ */
+static ssize_t cyttsp5_hw_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int rc;
+
+	rc = cyttsp5_startup(cd, true);
+	if (rc < 0)
+		dev_err(dev, "%s: HW reset failed r=%d\n", __func__, rc);
+
+	return size;
+}
+
+/*
+ * Show IRQ status via sysfs
+ */
+static ssize_t cyttsp5_hw_irq_stat_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int retval;
+
+	if (cd->cpdata->irq_stat) {
+		retval = cd->cpdata->irq_stat(cd->cpdata, dev);
+		switch (retval) {
+		case 0:
+			return snprintf(buf, CY_MAX_PRBUF_SIZE,
+				"Interrupt line is LOW.\n");
+		case 1:
+			return snprintf(buf, CY_MAX_PRBUF_SIZE,
+				"Interrupt line is HIGH.\n");
+		default:
+			return snprintf(buf, CY_MAX_PRBUF_SIZE,
+				"Function irq_stat() returned %d.\n", retval);
+		}
+	}
+
+	return snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Function irq_stat() undefined.\n");
+}
+
+/*
+ * Show IRQ enable/disable status via sysfs
+ */
+static ssize_t cyttsp5_drv_irq_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	mutex_lock(&cd->system_lock);
+	if (cd->irq_enabled)
+		ret = snprintf(buf, CY_MAX_PRBUF_SIZE,
+			"Driver interrupt is ENABLED\n");
+	else
+		ret = snprintf(buf, CY_MAX_PRBUF_SIZE,
+			"Driver interrupt is DISABLED\n");
+	mutex_unlock(&cd->system_lock);
+
+	return ret;
+}
+
+/*
+ * Enable/disable IRQ via sysfs
+ */
+static ssize_t cyttsp5_drv_irq_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	unsigned long value;
+	int retval = 0;
+
+	retval = kstrtoul(buf, 10, &value);
+	if (retval < 0) {
+		dev_err(dev, "%s: Invalid value\n", __func__);
+		goto cyttsp5_drv_irq_store_error_exit;
+	}
+
+	mutex_lock(&cd->system_lock);
+	switch (value) {
+	case 0:
+		if (cd->irq_enabled) {
+			cd->irq_enabled = false;
+			/* Disable IRQ */
+			disable_irq_nosync(cd->irq);
+			dev_info(dev, "%s: Driver IRQ now disabled\n",
+				__func__);
+		} else
+			dev_info(dev, "%s: Driver IRQ already disabled\n",
+				__func__);
+		break;
+
+	case 1:
+		if (cd->irq_enabled == false) {
+			cd->irq_enabled = true;
+			/* Enable IRQ */
+			enable_irq(cd->irq);
+			dev_info(dev, "%s: Driver IRQ now enabled\n",
+				__func__);
+		} else
+			dev_info(dev, "%s: Driver IRQ already enabled\n",
+				__func__);
+		break;
+
+	default:
+		dev_err(dev, "%s: Invalid value\n", __func__);
+	}
+	mutex_unlock(&(cd->system_lock));
+
+cyttsp5_drv_irq_store_error_exit:
+
+	return size;
+}
+
+/*
+ * Gets user input from sysfs and parse it
+ * return size of parsed output buffer
+ */
+
+#define CY_MAX_CONFIG_BYTES_DEC    256
+#define CYTTSP5_INPUT_ELEM_SZ_DEC 10
+
+static int cyttsp5_ic_parse_input_dec(struct device *dev, const char *buf,
+		size_t buf_size, u32 *ic_buf, size_t ic_buf_size)
+{
+	const char *pbuf = buf;
+	unsigned long value;
+	char scan_buf[CYTTSP5_INPUT_ELEM_SZ_DEC];
+	u32 i = 0;
+	u32 j;
+	int last = 0;
+	int ret;
+
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: pbuf=%pK buf=%pK size=%zu %s=%zu buf=%s\n",
+		__func__, pbuf, buf, buf_size, "scan buf size",
+		(size_t)CYTTSP5_INPUT_ELEM_SZ_DEC, buf);
+
+	while (pbuf <= (buf + buf_size)) {
+		if (i >= CY_MAX_CONFIG_BYTES_DEC) {
+			dev_err(dev, "%s: %s size=%d max=%d\n", __func__,
+					"Max cmd size exceeded", i,
+					CY_MAX_CONFIG_BYTES_DEC);
+			return -EINVAL;
+		}
+		if (i >= ic_buf_size) {
+			dev_err(dev, "%s: %s size=%d buf_size=%zu\n", __func__,
+					"Buffer size exceeded", i, ic_buf_size);
+			return -EINVAL;
+		}
+		while (((*pbuf == ' ') || (*pbuf == ','))
+				&& (pbuf < (buf + buf_size))) {
+			last = *pbuf;
+			pbuf++;
+		}
+
+		if (pbuf >= (buf + buf_size))
+			break;
+
+		memset(scan_buf, 0, CYTTSP5_INPUT_ELEM_SZ_DEC);
+		if ((last == ',') && (*pbuf == ',')) {
+			dev_err(dev, "%s: %s \",,\" not allowed.\n", __func__,
+					"Invalid data format.");
+			return -EINVAL;
+		}
+		for (j = 0; j < (CYTTSP5_INPUT_ELEM_SZ_DEC - 1)
+				&& (pbuf < (buf + buf_size))
+				&& (*pbuf != ' ')
+				&& (*pbuf != ','); j++) {
+			last = *pbuf;
+			scan_buf[j] = *pbuf++;
+		}
+		ret = kstrtoul(scan_buf, 10, &value);
+		if (ret < 0) {
+			dev_err(dev, "%s: Invalid data format.\n", __func__);
+			return ret;
+		}
+
+		ic_buf[i] = value;
+		i++;
+	}
+
+	return i;
+}
+
+/*
+ * Debugging options via sysfs
+ */
+static ssize_t cyttsp5_drv_debug_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	unsigned long value;
+	int rc;
+	u8 return_data[8];
+	static u8 wd_disabled;
+	u32 input_data[2];
+	int length;
+
+	/*maximal input two data*/
+	length = cyttsp5_ic_parse_input_dec(dev, buf, size,
+			input_data, ARRAY_SIZE(input_data));
+	if (length <= 0) {
+		dev_err(dev, "%s: %s failed\n", __func__,
+				"cyttsp5_ic_parse_input_dec");
+		goto cyttsp5_drv_debug_store_exit;
+	}
+	value = input_data[0];
+
+
+	/* Start watchdog timer command */
+	if (value == CY_DBG_HID_START_WD) {
+		dev_info(dev, "%s: start watchdog (cd=%pK)\n", __func__, cd);
+		wd_disabled = 0;
+		cyttsp5_start_wd_timer(cd);
+		goto cyttsp5_drv_debug_store_exit;
+	}
+
+	/* Stop watchdog timer temporarily */
+	cyttsp5_stop_wd_timer(cd);
+
+	if (value == CY_DBG_HID_STOP_WD) {
+		dev_info(dev, "%s: stop watchdog (cd=%pK)\n", __func__, cd);
+		wd_disabled = 1;
+		goto cyttsp5_drv_debug_store_exit;
+	}
+
+	switch (value) {
+	case CY_DBG_SUSPEND:
+		dev_info(dev, "%s: SUSPEND (cd=%pK)\n", __func__, cd);
+		rc = cyttsp5_core_sleep(cd);
+		if (rc)
+			dev_err(dev, "%s: Suspend failed rc=%d\n",
+				__func__, rc);
+		else
+			dev_info(dev, "%s: Suspend succeeded\n", __func__);
+		break;
+
+	case CY_DBG_RESUME:
+		dev_info(dev, "%s: RESUME (cd=%pK)\n", __func__, cd);
+		rc = cyttsp5_core_wake(cd);
+		if (rc)
+			dev_err(dev, "%s: Resume failed rc=%d\n",
+				__func__, rc);
+		else
+			dev_info(dev, "%s: Resume succeeded\n", __func__);
+		break;
+	case CY_DBG_SOFT_RESET:
+		dev_info(dev, "%s: SOFT RESET (cd=%pK)\n", __func__, cd);
+		rc = cyttsp5_hw_soft_reset(cd);
+		break;
+	case CY_DBG_RESET:
+		dev_info(dev, "%s: HARD RESET (cd=%pK)\n", __func__, cd);
+		rc = cyttsp5_hw_hard_reset(cd);
+		break;
+	case CY_DBG_HID_RESET:
+		dev_info(dev, "%s: hid_reset (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_cmd_reset(cd);
+		break;
+	case CY_DBG_HID_SET_POWER_ON:
+		dev_info(dev, "%s: hid_set_power_on (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_cmd_set_power(cd, HID_POWER_ON);
+		wd_disabled = 0;
+		break;
+	case CY_DBG_HID_SET_POWER_SLEEP:
+		dev_info(dev, "%s: hid_set_power_off (cd=%pK)\n", __func__, cd);
+		wd_disabled = 1;
+		cyttsp5_hid_cmd_set_power(cd, HID_POWER_SLEEP);
+		break;
+	case CY_DBG_HID_NULL:
+		dev_info(dev, "%s: hid_null (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_null(cd);
+		break;
+	case CY_DBG_HID_ENTER_BL:
+		dev_info(dev, "%s: start_bootloader (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_start_bootloader(cd);
+		break;
+	case CY_DBG_HID_SYSINFO:
+		dev_info(dev, "%s: get_sysinfo (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_get_sysinfo(cd);
+		break;
+	case CY_DBG_HID_SUSPEND_SCAN:
+		dev_info(dev, "%s: suspend_scanning (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_suspend_scanning(cd);
+		break;
+	case CY_DBG_HID_RESUME_SCAN:
+		dev_info(dev, "%s: resume_scanning (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_resume_scanning(cd);
+		break;
+	case HID_OUTPUT_BL_VERIFY_APP_INTEGRITY:
+		dev_info(dev, "%s: verify app integ (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_bl_verify_app_integrity(cd, &return_data[0]);
+		break;
+	case HID_OUTPUT_BL_GET_INFO:
+		dev_info(dev, "%s: bl get info (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_bl_get_information(cd, return_data);
+		break;
+	case HID_OUTPUT_BL_PROGRAM_AND_VERIFY:
+		dev_info(dev, "%s: program & verify (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_bl_program_and_verify(cd, 0, NULL);
+		break;
+	case HID_OUTPUT_BL_LAUNCH_APP:
+		dev_info(dev, "%s: launch app (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_bl_launch_app(cd);
+		break;
+	case HID_OUTPUT_BL_INITIATE_BL:
+		dev_info(dev, "%s: initiate bl (cd=%pK)\n", __func__, cd);
+		cyttsp5_hid_output_bl_initiate_bl(cd, 0, NULL, 0, NULL);
+		break;
+#ifdef TTHE_TUNER_SUPPORT
+	case CY_TTHE_TUNER_EXIT:
+		cd->tthe_exit = 1;
+		wake_up(&cd->wait_q);
+		kfree(cd->tthe_buf);
+		cd->tthe_buf = NULL;
+		cd->tthe_exit = 0;
+		break;
+	case CY_TTHE_BUF_CLEAN:
+		if (cd->tthe_buf)
+			memset(cd->tthe_buf, 0, CY_MAX_PRBUF_SIZE);
+		else
+			dev_info(dev, "%s : tthe_buf not existed\n", __func__);
+		break;
+#endif
+	case CY_DBG_REPORT_LEVEL:
+		mutex_lock(&cd->system_lock);
+		cd->debug_level = input_data[1];
+		dev_info(dev, "%s: Set debug_level: %d\n",
+			__func__, cd->debug_level);
+		mutex_unlock(&(cd->system_lock));
+		break;
+	case CY_DBG_WATCHDOG_INTERVAL:
+		mutex_lock(&cd->system_lock);
+		if (input_data[1] > 0)
+			cd->watchdog_interval = input_data[1];
+		dev_info(dev, "%s: Set watchdog_interval: %d\n",
+			__func__, cd->watchdog_interval);
+		mutex_unlock(&(cd->system_lock));
+		break;
+	case CY_DBG_SHOW_TIMESTAMP:
+		mutex_lock(&cd->system_lock);
+		cd->show_timestamp = input_data[1];
+		dev_info(dev, "%s: Set show_timestamp: %d\n",
+			__func__, cd->show_timestamp);
+		mutex_unlock(&(cd->system_lock));
+		break;
+
+	default:
+		dev_err(dev, "%s: Invalid value\n", __func__);
+	}
+
+	/* Enable watchdog timer */
+	if (!wd_disabled)
+		cyttsp5_start_wd_timer(cd);
+
+cyttsp5_drv_debug_store_exit:
+	return size;
+}
+
+/*
+ * Show system status on deep sleep status via sysfs
+ */
+static ssize_t cyttsp5_sleep_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	mutex_lock(&cd->system_lock);
+	if (cd->sleep_state == SS_SLEEP_ON)
+		ret = snprintf(buf, CY_MAX_PRBUF_SIZE, "off\n");
+	else
+		ret = snprintf(buf, CY_MAX_PRBUF_SIZE, "on\n");
+	mutex_unlock(&cd->system_lock);
+
+	return ret;
+}
+
+static ssize_t cyttsp5_easy_wakeup_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	mutex_lock(&cd->system_lock);
+	ret = snprintf(buf, CY_MAX_PRBUF_SIZE, "0x%02X\n",
+			cd->easy_wakeup_gesture);
+	mutex_unlock(&cd->system_lock);
+	return ret;
+}
+
+static ssize_t cyttsp5_easy_wakeup_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	unsigned long value;
+	int ret;
+
+	if (!cd->features.easywake)
+		return -EINVAL;
+
+	ret = kstrtoul(buf, 10, &value);
+	if (ret < 0)
+		return ret;
+
+	if (value > 0xFF)
+		return -EINVAL;
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&cd->system_lock);
+	if (cd->sysinfo.ready && IS_PIP_VER_GE(&cd->sysinfo, 1, 2))
+		cd->easy_wakeup_gesture = (u8)value;
+	else
+		ret = -ENODEV;
+	mutex_unlock(&cd->system_lock);
+
+	pm_runtime_put(dev);
+
+	if (ret)
+		return ret;
+
+	return size;
+}
+
+#ifdef EASYWAKE_TSG6
+/*
+ * Show easywake gesture id via sysfs
+ */
+static ssize_t cyttsp5_easy_wakeup_gesture_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	mutex_lock(&cd->system_lock);
+	ret = snprintf(buf, CY_MAX_PRBUF_SIZE, "0x%02X\n", cd->gesture_id);
+	mutex_unlock(&cd->system_lock);
+	return ret;
+}
+
+/*
+ * Show easywake gesture data via sysfs
+ * The format:
+ * x1(LSB), x1(MSB),y1(LSB), y1(MSB),x2(LSB), x2(MSB),y2(LSB), y2(MSB),...
+ */
+static ssize_t cyttsp5_easy_wakeup_gesture_data_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret = 0;
+	int i;
+
+	mutex_lock(&cd->system_lock);
+
+	for (i = 0; i < cd->gesture_data_length; i++)
+		ret += snprintf(buf + ret, CY_MAX_PRBUF_SIZE - ret,
+				"0x%02X\n", cd->gesture_data[i]);
+
+	ret += snprintf(buf + ret, CY_MAX_PRBUF_SIZE - ret,
+			"(%d bytes)\n", cd->gesture_data_length);
+
+	mutex_unlock(&cd->system_lock);
+	return ret;
+}
+#endif
+
+/* Show Panel ID via sysfs */
+static ssize_t cyttsp5_panel_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	ret = snprintf(buf, CY_MAX_PRBUF_SIZE, "0x%02X\n",
+			cd->panel_id);
+	return ret;
+}
+
+/* Show platform data via sysfs */
+static ssize_t cyttsp5_platform_data_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	ssize_t ret;
+
+	ret = snprintf(buf, PAGE_SIZE,
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n"
+		"%s: %d\n",
+		"Interrupt GPIO", pdata->core_pdata->irq_gpio,
+		"Reset GPIO", pdata->core_pdata->rst_gpio,
+		"Level trigger delay (us)", pdata->core_pdata->level_irq_udelay,
+		"HID descriptor register", pdata->core_pdata->hid_desc_register,
+		"Vendor ID", pdata->core_pdata->vendor_id,
+		"Product ID", pdata->core_pdata->product_id,
+		"Easy wakeup gesture", pdata->core_pdata->easy_wakeup_gesture,
+		"Vkeys x", pdata->mt_pdata->vkeys_x,
+		"Vkeys y", pdata->mt_pdata->vkeys_y,
+		"Core data flags", pdata->core_pdata->flags,
+		"MT data flags", pdata->mt_pdata->flags,
+		"Loader data flags", pdata->loader_pdata->flags);
+	return ret;
+}
+
+static struct device_attribute attributes[] = {
+	__ATTR(ic_ver, 0444, cyttsp5_ic_ver_show, NULL),
+	__ATTR(drv_ver, 0444, cyttsp5_drv_ver_show, NULL),
+	__ATTR(hw_reset, 0200, NULL, cyttsp5_hw_reset_store),
+	__ATTR(hw_irq_stat, 0400, cyttsp5_hw_irq_stat_show, NULL),
+	__ATTR(drv_irq, 0600, cyttsp5_drv_irq_show,
+		cyttsp5_drv_irq_store),
+	__ATTR(drv_debug, 0200, NULL, cyttsp5_drv_debug_store),
+	__ATTR(sleep_status, 0400, cyttsp5_sleep_status_show, NULL),
+	__ATTR(easy_wakeup_gesture, 0600,
+		cyttsp5_easy_wakeup_gesture_show,
+		cyttsp5_easy_wakeup_gesture_store),
+#ifdef EASYWAKE_TSG6
+	__ATTR(easy_wakeup_gesture_id, 0400,
+		cyttsp5_easy_wakeup_gesture_id_show, NULL),
+	__ATTR(easy_wakeup_gesture_data, 0400,
+		cyttsp5_easy_wakeup_gesture_data_show, NULL),
+#endif
+	__ATTR(panel_id, 0444, cyttsp5_panel_id_show, NULL),
+	__ATTR(platform_data, 0444, cyttsp5_platform_data_show, NULL),
+};
+
+static int add_sysfs_interfaces(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(attributes); i++)
+		if (device_create_file(dev, attributes + i))
+			goto undo;
+	return 0;
+undo:
+	for (i--; i >= 0; i--)
+		device_remove_file(dev, attributes + i);
+	dev_err(dev, "%s: failed to create sysfs interface\n", __func__);
+	return -ENODEV;
+}
+
+static void remove_sysfs_interfaces(struct device *dev)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(attributes); i++)
+		device_remove_file(dev, attributes + i);
+}
+
+/*
+ * ttdl_restart via sysfs
+ */
+static ssize_t cyttsp5_ttdl_restart_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int rc;
+	struct i2c_client *client =
+		(struct i2c_client *)container_of(dev, struct i2c_client, dev);
+
+	if (is_cyttsp5_probe_success) {
+		dev_err(dev,
+			"%s: previous cyttsp5_probe successful, do nothing\n",
+			__func__);
+		return size;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(dev, "%s I2C functionality not Supported\n", __func__);
+		return -EIO;
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	rc = cyttsp5_devtree_create_and_get_pdata(dev);
+	if (rc < 0)
+		return rc;
+#endif
+
+	rc = cyttsp5_probe(cyttsp5_bus_ops_save, &client->dev, client->irq,
+			  512);
+
+	if (!rc) {
+		is_cyttsp5_probe_success = true;
+		dev_err(dev, "%s restart successful\n", __func__);
+	} else {
+		is_cyttsp5_probe_success = false;
+		dev_err(dev, "%s: ttdl restart failed r=%d\n",
+			__func__, rc);
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	if (rc)
+		cyttsp5_devtree_clean_pdata(dev);
+#endif
+
+	return size;
+
+}
+static DEVICE_ATTR(ttdl_restart, 0200, NULL, cyttsp5_ttdl_restart_store);
+
+
+#ifdef TTHE_TUNER_SUPPORT
+static int tthe_debugfs_open(struct inode *inode, struct file *filp)
+{
+	struct cyttsp5_core_data *cd = inode->i_private;
+
+	filp->private_data = inode->i_private;
+
+	if (cd->tthe_buf)
+		return -EBUSY;
+
+	cd->tthe_buf = kzalloc(CY_MAX_PRBUF_SIZE, GFP_KERNEL);
+	if (!cd->tthe_buf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int tthe_debugfs_close(struct inode *inode, struct file *filp)
+{
+	struct cyttsp5_core_data *cd = filp->private_data;
+
+	filp->private_data = NULL;
+
+	kfree(cd->tthe_buf);
+	cd->tthe_buf = NULL;
+
+	return 0;
+}
+
+static ssize_t tthe_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_core_data *cd = filp->private_data;
+	int size;
+	int ret;
+
+	wait_event_interruptible(cd->wait_q,
+			cd->tthe_buf_len != 0 || cd->tthe_exit);
+	mutex_lock(&cd->tthe_lock);
+	if (cd->tthe_exit) {
+		mutex_unlock(&cd->tthe_lock);
+		return 0;
+	}
+	if (count > cd->tthe_buf_len)
+		size = cd->tthe_buf_len;
+	else
+		size = count;
+	if (!size) {
+		mutex_unlock(&cd->tthe_lock);
+		return 0;
+	}
+
+	ret = copy_to_user(buf, cd->tthe_buf, cd->tthe_buf_len);
+	if (ret == size)
+		return -EFAULT;
+	size -= ret;
+	cd->tthe_buf_len -= size;
+	mutex_unlock(&cd->tthe_lock);
+	*ppos += size;
+	return size;
+}
+
+static const struct file_operations tthe_debugfs_fops = {
+	.open = tthe_debugfs_open,
+	.release = tthe_debugfs_close,
+	.read = tthe_debugfs_read,
+};
+#endif
+
+static struct cyttsp5_core_nonhid_cmd _cyttsp5_core_nonhid_cmd = {
+	.start_bl = _cyttsp5_request_hid_output_start_bl,
+	.suspend_scanning = _cyttsp5_request_hid_output_suspend_scanning,
+	.resume_scanning = _cyttsp5_request_hid_output_resume_scanning,
+	.get_param = _cyttsp5_request_hid_output_get_param,
+	.set_param = _cyttsp5_request_hid_output_set_param,
+	.verify_config_block_crc =
+		_cyttsp5_request_hid_output_verify_config_block_crc,
+	.get_config_row_size = _cyttsp5_request_hid_output_get_config_row_size,
+	.get_data_structure = _cyttsp5_request_hid_output_get_data_structure,
+	.run_selftest = _cyttsp5_request_hid_output_run_selftest,
+	.get_selftest_result = _cyttsp5_request_hid_output_get_selftest_result,
+	.calibrate_idacs = _cyttsp5_request_hid_output_calibrate_idacs,
+	.initialize_baselines =
+		_cyttsp5_request_hid_output_initialize_baselines,
+	.exec_panel_scan = _cyttsp5_request_hid_output_exec_panel_scan,
+	.retrieve_panel_scan = _cyttsp5_request_hid_output_retrieve_panel_scan,
+	.write_conf_block = _cyttsp5_request_hid_output_write_conf_block,
+	.user_cmd = _cyttsp5_request_hid_output_user_cmd,
+	.get_bl_info = _cyttsp5_request_hid_output_bl_get_information,
+	.initiate_bl = _cyttsp5_request_hid_output_bl_initiate_bl,
+	.launch_app = _cyttsp5_request_hid_output_launch_app,
+	.prog_and_verify = _cyttsp5_request_hid_output_bl_program_and_verify,
+	.verify_app_integrity =
+		_cyttsp5_request_hid_output_bl_verify_app_integrity,
+	.get_panel_id = _cyttsp5_request_hid_output_bl_get_panel_id,
+};
+
+static struct cyttsp5_core_commands _cyttsp5_core_commands = {
+	.subscribe_attention = _cyttsp5_subscribe_attention,
+	.unsubscribe_attention = _cyttsp5_unsubscribe_attention,
+	.request_exclusive = _cyttsp5_request_exclusive,
+	.release_exclusive = _cyttsp5_release_exclusive,
+	.request_reset = _cyttsp5_request_reset,
+	.request_restart = _cyttsp5_request_restart,
+	.request_sysinfo = _cyttsp5_request_sysinfo,
+	.request_loader_pdata = _cyttsp5_request_loader_pdata,
+	.request_stop_wd = _cyttsp5_request_stop_wd,
+	.request_start_wd = _cyttsp5_request_start_wd,
+	.request_get_hid_desc = _cyttsp5_request_get_hid_desc,
+	.request_get_mode = _cyttsp5_request_get_mode,
+#ifdef TTHE_TUNER_SUPPORT
+	.request_tthe_print = _cyttsp5_request_tthe_print,
+#endif
+	.nonhid_cmd = &_cyttsp5_core_nonhid_cmd,
+};
+
+struct cyttsp5_core_commands *cyttsp5_get_commands(void)
+{
+	return &_cyttsp5_core_commands;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_get_commands);
+
+static DEFINE_MUTEX(core_list_lock);
+static LIST_HEAD(core_list);
+static DEFINE_MUTEX(module_list_lock);
+static LIST_HEAD(module_list);
+static int core_number;
+
+static int cyttsp5_probe_module(struct cyttsp5_core_data *cd,
+		struct cyttsp5_module *module)
+{
+	struct module_node *module_node;
+	int rc = 0;
+
+	module_node = kzalloc(sizeof(*module_node), GFP_KERNEL);
+	if (!module_node)
+		return -ENOMEM;
+
+	module_node->module = module;
+
+	mutex_lock(&cd->module_list_lock);
+	list_add(&module_node->node, &cd->module_list);
+	mutex_unlock(&cd->module_list_lock);
+
+	rc = module->probe(cd->dev, &module_node->data);
+	if (rc) {
+		/*
+		 * Remove from the list when probe fails
+		 * in order not to call release
+		 */
+		mutex_lock(&cd->module_list_lock);
+		list_del(&module_node->node);
+		mutex_unlock(&cd->module_list_lock);
+		kfree(module_node);
+		goto exit;
+	}
+
+exit:
+	return rc;
+}
+
+static void cyttsp5_release_module(struct cyttsp5_core_data *cd,
+		struct cyttsp5_module *module)
+{
+	struct module_node *m, *m_n;
+
+	mutex_lock(&cd->module_list_lock);
+	list_for_each_entry_safe(m, m_n, &cd->module_list, node)
+		if (m->module == module) {
+			module->release(cd->dev, m->data);
+			list_del(&m->node);
+			kfree(m);
+			break;
+		}
+	mutex_unlock(&cd->module_list_lock);
+}
+
+static void cyttsp5_probe_modules(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_module *m;
+	int rc = 0;
+
+	mutex_lock(&module_list_lock);
+	list_for_each_entry(m, &module_list, node) {
+		rc = cyttsp5_probe_module(cd, m);
+		if (rc)
+			dev_err(cd->dev, "%s: Probe fails for module %s\n",
+				__func__, m->name);
+	}
+	mutex_unlock(&module_list_lock);
+}
+
+static void cyttsp5_release_modules(struct cyttsp5_core_data *cd)
+{
+	struct cyttsp5_module *m;
+
+	mutex_lock(&module_list_lock);
+	list_for_each_entry(m, &module_list, node)
+		cyttsp5_release_module(cd, m);
+	mutex_unlock(&module_list_lock);
+}
+
+struct cyttsp5_core_data *cyttsp5_get_core_data(char *id)
+{
+	struct cyttsp5_core_data *d;
+
+	list_for_each_entry(d, &core_list, node)
+		if (!strncmp(d->core_id, id, 20))
+			return d;
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_get_core_data);
+
+static void cyttsp5_add_core(struct device *dev)
+{
+	struct cyttsp5_core_data *d;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	mutex_lock(&core_list_lock);
+	list_for_each_entry(d, &core_list, node)
+		if (d->dev == dev)
+			goto unlock;
+
+	list_add(&cd->node, &core_list);
+unlock:
+	mutex_unlock(&core_list_lock);
+}
+
+static void cyttsp5_del_core(struct device *dev)
+{
+	struct cyttsp5_core_data *d, *d_n;
+
+	mutex_lock(&core_list_lock);
+	list_for_each_entry_safe(d, d_n, &core_list, node)
+		if (d->dev == dev) {
+			list_del(&d->node);
+			goto unlock;
+		}
+unlock:
+	mutex_unlock(&core_list_lock);
+}
+
+int cyttsp5_register_module(struct cyttsp5_module *module)
+{
+	struct cyttsp5_module *m;
+	struct cyttsp5_core_data *cd;
+
+	int rc = 0;
+
+	if (!module || !module->probe || !module->release)
+		return -EINVAL;
+
+	mutex_lock(&module_list_lock);
+	list_for_each_entry(m, &module_list, node)
+		if (m == module) {
+			rc = -EEXIST;
+			goto unlock;
+		}
+
+	list_add(&module->node, &module_list);
+
+	/* Probe the module for each core */
+	mutex_lock(&core_list_lock);
+	list_for_each_entry(cd, &core_list, node)
+		cyttsp5_probe_module(cd, module);
+	mutex_unlock(&core_list_lock);
+
+unlock:
+	mutex_unlock(&module_list_lock);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_register_module);
+
+void cyttsp5_unregister_module(struct cyttsp5_module *module)
+{
+	struct cyttsp5_module *m, *m_n;
+	struct cyttsp5_core_data *cd;
+
+	if (!module)
+		return;
+
+	mutex_lock(&module_list_lock);
+
+	/* Release the module for each core */
+	mutex_lock(&core_list_lock);
+	list_for_each_entry(cd, &core_list, node)
+		cyttsp5_release_module(cd, module);
+	mutex_unlock(&core_list_lock);
+
+	list_for_each_entry_safe(m, m_n, &module_list, node)
+		if (m == module) {
+			list_del(&m->node);
+			break;
+		}
+
+	mutex_unlock(&module_list_lock);
+}
+EXPORT_SYMBOL_GPL(cyttsp5_unregister_module);
+
+void *cyttsp5_get_module_data(struct device *dev, struct cyttsp5_module *module)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct module_node *m;
+	void *data = NULL;
+
+	mutex_lock(&cd->module_list_lock);
+	list_for_each_entry(m, &cd->module_list, node)
+		if (m->module == module) {
+			data = m->data;
+			break;
+		}
+	mutex_unlock(&cd->module_list_lock);
+
+	return data;
+}
+EXPORT_SYMBOL(cyttsp5_get_module_data);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void cyttsp5_early_suspend(struct early_suspend *h)
+{
+	struct cyttsp5_core_data *cd =
+		container_of(h, struct cyttsp5_core_data, es);
+
+	call_atten_cb(cd, CY_ATTEN_SUSPEND, 0);
+}
+
+static void cyttsp5_late_resume(struct early_suspend *h)
+{
+	struct cyttsp5_core_data *cd =
+		container_of(h, struct cyttsp5_core_data, es);
+
+	call_atten_cb(cd, CY_ATTEN_RESUME, 0);
+}
+
+static void cyttsp5_setup_early_suspend(struct cyttsp5_core_data *cd)
+{
+	cd->es.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	cd->es.suspend = cyttsp5_early_suspend;
+	cd->es.resume = cyttsp5_late_resume;
+
+	register_early_suspend(&cd->es);
+}
+#elif defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct cyttsp5_core_data *cd =
+		container_of(self, struct cyttsp5_core_data, fb_notifier);
+	struct fb_event *evdata = data;
+	int *blank;
+
+	if (event != FB_EVENT_BLANK || !evdata)
+		goto exit;
+
+	blank = evdata->data;
+	if (*blank == FB_BLANK_UNBLANK) {
+		dev_dbg(cd->dev, "%s: UNBLANK!\n", __func__);
+		if (cd->fb_state != FB_ON) {
+			#ifdef USE_FB_SUSPEND_RESUME
+			cyttsp5_core_resume(cd->dev);
+			cd->wake_initiated_by_device = 0;
+			#endif
+			call_atten_cb(cd, CY_ATTEN_RESUME, 0);
+			cd->fb_state = FB_ON;
+		}
+	} else if (*blank == FB_BLANK_POWERDOWN) {
+		dev_dbg(cd->dev, "%s: POWERDOWN!\n", __func__);
+		if (cd->fb_state != FB_OFF) {
+			#ifdef USE_FB_SUSPEND_RESUME
+			cyttsp5_core_suspend(cd->dev);
+			#endif
+			call_atten_cb(cd, CY_ATTEN_SUSPEND, 0);
+			cd->fb_state = FB_OFF;
+		}
+	}
+
+exit:
+	return 0;
+}
+
+static void cyttsp5_setup_fb_notifier(struct cyttsp5_core_data *cd)
+{
+	int rc;
+
+	cd->fb_state = FB_ON;
+
+	cd->fb_notifier.notifier_call = fb_notifier_callback;
+
+	rc = fb_register_client(&cd->fb_notifier);
+	if (rc)
+		dev_err(cd->dev, "Unable to register fb_notifier: %d\n", rc);
+}
+#endif
+
+static int cyttsp5_setup_irq_gpio(struct cyttsp5_core_data *cd)
+{
+	struct device *dev = cd->dev;
+	unsigned long irq_flags;
+	int rc;
+
+	/* Initialize IRQ */
+	cd->irq = gpio_to_irq(cd->cpdata->irq_gpio);
+	if (cd->irq < 0)
+		return -EINVAL;
+
+	cd->irq_enabled = true;
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: initialize threaded irq=%d\n",
+		__func__, cd->irq);
+	if (cd->cpdata->level_irq_udelay > 0)
+		/* use level triggered interrupts */
+		irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT;
+	else
+		/* use edge triggered interrupts */
+		irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+
+	rc = request_threaded_irq(cd->irq, NULL, cyttsp5_irq, irq_flags,
+		dev_name(dev), cd);
+	if (rc < 0)
+		dev_err(dev, "%s: Error, could not request irq\n", __func__);
+
+	return rc;
+}
+
+
+static int cyttsp5_power_init(struct cyttsp5_core_data *cd, bool on)
+{
+	int rc;
+
+	if (!on)
+		goto pwr_deinit;
+
+	cd->vdd = regulator_get(cd->dev, "vdd");
+	if (IS_ERR(cd->vdd)) {
+		rc = PTR_ERR(cd->vdd);
+		dev_err(cd->dev,
+			"Regulator get failed vdd rc=%d\n", rc);
+		return rc;
+	}
+
+	if (regulator_count_voltages(cd->vdd) > 0) {
+		rc = regulator_set_voltage(cd->vdd, FT_VTG_MIN_UV,
+					FT_VTG_MAX_UV);
+		if (rc) {
+			dev_err(cd->dev,
+				"Regulator set_vtg failed vdd rc=%d\n", rc);
+			goto reg_vdd_put;
+		}
+	}
+
+	cd->vcc_i2c = regulator_get(cd->dev, "vcc_i2c");
+	if (IS_ERR(cd->vcc_i2c)) {
+		rc = PTR_ERR(cd->vcc_i2c);
+		dev_err(cd->dev,
+			"Regulator get failed vcc_i2c rc=%d\n", rc);
+		goto reg_vdd_set_vtg;
+	}
+
+	if (regulator_count_voltages(cd->vcc_i2c) > 0) {
+		rc = regulator_set_voltage(cd->vcc_i2c, FT_I2C_VTG_MIN_UV,
+					FT_I2C_VTG_MAX_UV);
+		if (rc) {
+			dev_err(cd->dev,
+			"Regulator set_vtg failed vcc_i2c rc=%d\n", rc);
+			goto reg_vcc_i2c_put;
+		}
+	}
+
+	return 0;
+
+reg_vcc_i2c_put:
+	regulator_put(cd->vcc_i2c);
+reg_vdd_set_vtg:
+	if (regulator_count_voltages(cd->vdd) > 0)
+		regulator_set_voltage(cd->vdd, 0, FT_VTG_MAX_UV);
+reg_vdd_put:
+	regulator_put(cd->vdd);
+	return rc;
+
+pwr_deinit:
+	if (regulator_count_voltages(cd->vdd) > 0)
+		regulator_set_voltage(cd->vdd, 0, FT_VTG_MAX_UV);
+
+	regulator_put(cd->vdd);
+
+	if (regulator_count_voltages(cd->vcc_i2c) > 0)
+		regulator_set_voltage(cd->vcc_i2c, 0, FT_I2C_VTG_MAX_UV);
+
+	regulator_put(cd->vcc_i2c);
+	return 0;
+}
+
+static int cyttsp5_ts_pinctrl_init(struct cyttsp5_core_data *cd)
+{
+	int retval;
+
+	/* Get pinctrl if target uses pinctrl */
+	cd->ts_pinctrl = devm_pinctrl_get(cd->dev);
+	if (IS_ERR_OR_NULL(cd->ts_pinctrl)) {
+		retval = PTR_ERR(cd->ts_pinctrl);
+		dev_dbg(cd->dev,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	cd->pinctrl_state_active
+		= pinctrl_lookup_state(cd->ts_pinctrl,
+				PINCTRL_STATE_ACTIVE);
+	if (IS_ERR_OR_NULL(cd->pinctrl_state_active)) {
+		retval = PTR_ERR(cd->pinctrl_state_active);
+		dev_err(cd->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	cd->pinctrl_state_suspend
+		= pinctrl_lookup_state(cd->ts_pinctrl,
+			PINCTRL_STATE_SUSPEND);
+	if (IS_ERR_OR_NULL(cd->pinctrl_state_suspend)) {
+		retval = PTR_ERR(cd->pinctrl_state_suspend);
+		dev_err(cd->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	cd->pinctrl_state_release
+		= pinctrl_lookup_state(cd->ts_pinctrl,
+			PINCTRL_STATE_RELEASE);
+	if (IS_ERR_OR_NULL(cd->pinctrl_state_release)) {
+		retval = PTR_ERR(cd->pinctrl_state_release);
+		dev_dbg(cd->dev,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return 0;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(cd->ts_pinctrl);
+err_pinctrl_get:
+	cd->ts_pinctrl = NULL;
+	return retval;
+}
+
+int cyttsp5_probe(const struct cyttsp5_bus_ops *ops, struct device *dev,
+		u16 irq, size_t xfer_buf_size)
+{
+	struct cyttsp5_core_data *cd;
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	enum cyttsp5_atten_type type;
+	struct i2c_client *client = to_i2c_client(dev);
+	int rc = 0;
+
+	/* Set default values on first probe */
+	if (cyttsp5_first_probe) {
+		cyttsp5_first_probe      = false;
+		is_cyttsp5_probe_success = false;
+		cyttsp5_bus_ops_save     = NULL;
+	}
+
+	if (!pdata || !pdata->core_pdata || !pdata->mt_pdata) {
+		dev_err(dev, "%s: Missing platform data\n", __func__);
+		rc = -ENODEV;
+		goto error_no_pdata;
+	}
+
+	if (pdata->core_pdata->flags & CY_CORE_FLAG_POWEROFF_ON_SLEEP) {
+		if (!pdata->core_pdata->power) {
+			dev_err(dev, "%s: Missing platform data function\n",
+					__func__);
+			rc = -ENODEV;
+			goto error_no_pdata;
+		}
+	}
+
+	/* get context and debug print buffers */
+	cd = devm_kzalloc(dev, sizeof(*cd), GFP_KERNEL);
+	if (!cd) {
+		rc = -ENOMEM;
+		goto error_alloc_data;
+	}
+
+	/* Initialize device info */
+	cd->dev = dev;
+	cd->pdata = pdata;
+	cd->cpdata = pdata->core_pdata;
+	cd->bus_ops = ops;
+	cd->debug_level = CY_INITIAL_DEBUG_LEVEL;
+	cd->watchdog_interval = CY_WATCHDOG_TIMEOUT;
+	cd->show_timestamp = CY_INITIAL_SHOW_TIME_STAMP;
+	scnprintf(cd->core_id, 20, "%s%d", CYTTSP5_CORE_NAME, core_number++);
+
+	/* Initialize mutexes and spinlocks */
+	mutex_init(&cd->module_list_lock);
+	mutex_init(&cd->system_lock);
+	mutex_init(&cd->adap_lock);
+	mutex_init(&cd->hid_report_lock);
+	spin_lock_init(&cd->spinlock);
+
+	/* Initialize module list */
+	INIT_LIST_HEAD(&cd->module_list);
+
+	/* Initialize attention lists */
+	for (type = 0; type < CY_ATTEN_NUM_ATTEN; type++)
+		INIT_LIST_HEAD(&cd->atten_list[type]);
+
+	/* Initialize parameter list */
+	INIT_LIST_HEAD(&cd->param_list);
+
+	/* Initialize wait queue */
+	init_waitqueue_head(&cd->wait_q);
+
+	rc = cyttsp5_ts_pinctrl_init(cd);
+	if (!rc && cd->ts_pinctrl) {
+		/*
+		 * Pinctrl handle is optional. If pinctrl handle is found
+		 * let pins to be configured in active state. If not
+		 * found continue further without error.
+		 */
+		rc = pinctrl_select_state(cd->ts_pinctrl,
+					cd->pinctrl_state_active);
+		if (rc < 0)
+			dev_err(&client->dev,
+				"failed to select pin to active state");
+	}
+
+	rc = cyttsp5_power_init(cd, true);
+	if (rc < 0)
+		dev_err(&client->dev, "failed to cyttsp5_power_init");
+
+	rc = regulator_enable(cd->vdd);
+	if (rc) {
+		dev_err(dev, "Regulator vdd enable failed rc=%d\n", rc);
+		goto error_power;
+	}
+
+	rc = regulator_enable(cd->vcc_i2c);
+	if (rc) {
+		dev_err(dev, "Regulator vcc_i2c enable failed rc=%d\n", rc);
+		regulator_disable(cd->vdd);
+		goto error_power;
+	}
+
+	/* Initialize works */
+	INIT_WORK(&cd->startup_work, cyttsp5_startup_work_function);
+	INIT_WORK(&cd->watchdog_work, cyttsp5_watchdog_work);
+
+	/* Initialize HID specific data */
+	cd->hid_core.hid_vendor_id = (cd->cpdata->vendor_id) ?
+		cd->cpdata->vendor_id : CY_HID_VENDOR_ID;
+	cd->hid_core.hid_product_id = (cd->cpdata->product_id) ?
+		cd->cpdata->product_id : CY_HID_APP_PRODUCT_ID;
+	cd->hid_core.hid_desc_register =
+		cpu_to_le16(cd->cpdata->hid_desc_register);
+
+	/* Set platform easywake value */
+	cd->easy_wakeup_gesture = cd->cpdata->easy_wakeup_gesture;
+
+	/* Set Panel ID to Not Enabled */
+	cd->panel_id = PANEL_ID_NOT_ENABLED;
+
+	dev_set_drvdata(dev, cd);
+	cyttsp5_add_core(dev);
+
+	/*create ttdl_restart sysfs node is probe failed*/
+	if (!is_cyttsp5_probe_success)
+		device_create_file(dev, &dev_attr_ttdl_restart);
+
+	/*
+	 * Save the pointer to a global value, which will be used
+	 * in ttdl_restart function
+	 */
+	cyttsp5_bus_ops_save = ops;
+
+	/* Call platform init function */
+	if (cd->cpdata->init) {
+		parade_debug(cd->dev, DEBUG_LEVEL_1, "%s: Init HW\n", __func__);
+		rc = cd->cpdata->init(cd->cpdata, 1, cd->dev);
+	} else {
+		dev_info(cd->dev, "%s: No HW INIT function\n", __func__);
+		rc = 0;
+	}
+	if (rc < 0)
+		dev_err(cd->dev, "%s: HW Init fail r=%d\n", __func__, rc);
+
+	/* Call platform detect function */
+	if (cd->cpdata->detect) {
+		dev_info(cd->dev, "%s: Detect HW\n", __func__);
+		rc = cd->cpdata->detect(cd->cpdata, cd->dev,
+				cyttsp5_platform_detect_read);
+		if (rc) {
+			dev_info(cd->dev, "%s: No HW detected\n", __func__);
+			rc = -ENODEV;
+			goto error_detect;
+		}
+	}
+
+	/* Setup watchdog timer */
+	setup_timer(&cd->watchdog_timer, cyttsp5_watchdog_timer,
+			(unsigned long)cd);
+
+	rc = cyttsp5_setup_irq_gpio(cd);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, could not setup IRQ\n", __func__);
+		goto error_setup_irq;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: add sysfs interfaces\n",
+		__func__);
+	rc = add_sysfs_interfaces(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, fail sysfs init\n", __func__);
+		goto error_attr_create;
+	}
+
+#ifdef TTHE_TUNER_SUPPORT
+	mutex_init(&cd->tthe_lock);
+	cd->tthe_debugfs = debugfs_create_file(CYTTSP5_TTHE_TUNER_FILE_NAME,
+			0644, NULL, cd, &tthe_debugfs_fops);
+#endif
+	rc = device_init_wakeup(dev, 1);
+	if (rc < 0)
+		dev_err(dev, "%s: Error, device_init_wakeup rc:%d\n",
+				__func__, rc);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	/*
+	 * call startup directly to ensure that the device
+	 * is tested before leaving the probe
+	 */
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: call startup\n", __func__);
+	rc = cyttsp5_startup(cd, false);
+
+	pm_runtime_put_sync(dev);
+
+	/* Do not fail probe if startup fails but the device is detected */
+	if (rc == -ENODEV) {
+		dev_err(cd->dev, "%s: Fail initial startup r=%d\n",
+			__func__, rc);
+		goto error_startup;
+	}
+
+	rc = cyttsp5_mt_probe(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, fail mt probe\n", __func__);
+		goto error_startup;
+	}
+
+	rc = cyttsp5_btn_probe(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, fail btn probe\n", __func__);
+		goto error_startup_mt;
+	}
+
+	rc = cyttsp5_proximity_probe(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, fail proximity probe\n", __func__);
+		goto error_startup_btn;
+	}
+
+	/* Probe registered modules */
+	cyttsp5_probe_modules(cd);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	cyttsp5_setup_early_suspend(cd);
+#elif defined(CONFIG_FB)
+	cyttsp5_setup_fb_notifier(cd);
+#endif
+
+#ifdef NEED_SUSPEND_NOTIFIER
+	cd->pm_notifier.notifier_call = cyttsp5_pm_notifier;
+	register_pm_notifier(&cd->pm_notifier);
+#endif
+	is_cyttsp5_probe_success = true;
+	return 0;
+
+error_startup_btn:
+	cyttsp5_btn_release(dev);
+error_startup_mt:
+	cyttsp5_mt_release(dev);
+error_startup:
+	pm_runtime_disable(dev);
+	device_init_wakeup(dev, 0);
+	cancel_work_sync(&cd->startup_work);
+	cyttsp5_stop_wd_timer(cd);
+	cyttsp5_free_si_ptrs(cd);
+	remove_sysfs_interfaces(dev);
+error_attr_create:
+	free_irq(cd->irq, cd);
+	del_timer(&cd->watchdog_timer);
+error_setup_irq:
+error_detect:
+	if (cd->cpdata->init)
+		cd->cpdata->init(cd->cpdata, 0, dev);
+	cyttsp5_del_core(dev);
+	dev_set_drvdata(dev, NULL);
+error_power:
+	kfree(cd);
+error_alloc_data:
+error_no_pdata:
+	dev_err(dev, "%s failed.\n", __func__);
+	is_cyttsp5_probe_success = false;
+	return rc;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_probe);
+
+int cyttsp5_release(struct cyttsp5_core_data *cd)
+{
+	struct device *dev = cd->dev;
+
+	/* Release successfully probed modules */
+	cyttsp5_release_modules(cd);
+
+	cyttsp5_proximity_release(dev);
+	cyttsp5_btn_release(dev);
+	cyttsp5_mt_release(dev);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	unregister_early_suspend(&cd->es);
+#elif defined(CONFIG_FB)
+	fb_unregister_client(&cd->fb_notifier);
+#endif
+
+#ifdef NEED_SUSPEND_NOTIFIER
+	unregister_pm_notifier(&cd->pm_notifier);
+#endif
+
+	/*
+	 * Suspend the device before freeing the startup_work and stopping
+	 * the watchdog since sleep function restarts watchdog on failure
+	 */
+	pm_runtime_suspend(dev);
+	pm_runtime_disable(dev);
+
+	cancel_work_sync(&cd->startup_work);
+
+	cyttsp5_stop_wd_timer(cd);
+
+	device_init_wakeup(dev, 0);
+
+#ifdef TTHE_TUNER_SUPPORT
+	mutex_lock(&cd->tthe_lock);
+	cd->tthe_exit = 1;
+	wake_up(&cd->wait_q);
+	mutex_unlock(&cd->tthe_lock);
+	debugfs_remove(cd->tthe_debugfs);
+#endif
+	remove_sysfs_interfaces(dev);
+	free_irq(cd->irq, cd);
+	if (cd->cpdata->init)
+		cd->cpdata->init(cd->cpdata, 0, dev);
+	dev_set_drvdata(dev, NULL);
+	cyttsp5_del_core(dev);
+	cyttsp5_free_si_ptrs(cd);
+	cyttsp5_free_hid_reports(cd);
+	kfree(cd);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_release);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product Core Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_debug.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_debug.c
new file mode 100644
index 0000000..698a144
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_debug.c
@@ -0,0 +1,391 @@
+/*
+ * cyttsp5_debug.c
+ * Parade TrueTouch(TM) Standard Product V5 Debug Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+#define CYTTSP5_DEBUG_NAME "cyttsp5_debug"
+
+struct cyttsp5_debug_data {
+	struct device *dev;
+	struct cyttsp5_sysinfo *si;
+	uint32_t interrupt_count;
+	uint32_t formated_output;
+	struct mutex sysfs_lock;
+	u8 pr_buf[CY_MAX_PRBUF_SIZE];
+};
+
+static struct cyttsp5_core_commands *cmd;
+
+static struct cyttsp5_module debug_module;
+
+static inline struct cyttsp5_debug_data *cyttsp5_get_debug_data(
+		struct device *dev)
+{
+	return cyttsp5_get_module_data(dev, &debug_module);
+}
+
+/*
+ * This function provide output of combined xy_mode and xy_data.
+ * Required by TTHE.
+ */
+static void cyttsp5_pr_buf_op_mode(struct cyttsp5_debug_data *dd, u8 *pr_buf,
+		struct cyttsp5_sysinfo *si, u8 cur_touch)
+{
+	const char fmt[] = "%02X ";
+	int max = (CY_MAX_PRBUF_SIZE - 1) - sizeof(CY_PR_TRUNCATED);
+	u8 report_id = si->xy_mode[2];
+	int header_size = 0;
+	int report_size = 0;
+	int total_size = 0;
+	int i, k;
+
+	if (report_id == si->desc.tch_report_id) {
+		header_size = si->desc.tch_header_size;
+		report_size = cur_touch * si->desc.tch_record_size;
+	} else if (report_id == si->desc.btn_report_id) {
+		header_size = BTN_INPUT_HEADER_SIZE;
+		report_size = BTN_REPORT_SIZE;
+	}
+	total_size = header_size + report_size;
+
+	pr_buf[0] = 0;
+	for (i = k = 0; i < header_size && i < max; i++, k += 3)
+		scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, si->xy_mode[i]);
+
+	for (i = 0; i < report_size && i < max; i++, k += 3)
+		scnprintf(pr_buf + k, CY_MAX_PRBUF_SIZE, fmt, si->xy_data[i]);
+
+	pr_info("%s=%s%s\n", "cyttsp5_OpModeData", pr_buf,
+			total_size <= max ? "" : CY_PR_TRUNCATED);
+}
+
+static void cyttsp5_debug_print(struct device *dev, u8 *pr_buf, u8 *sptr,
+		int size, const char *data_name)
+{
+	int i, j;
+	int elem_size = sizeof("XX ") - 1;
+	int max = (CY_MAX_PRBUF_SIZE - 1) / elem_size;
+	int limit = size < max ? size : max;
+
+	if (limit < 0)
+		limit = 0;
+
+	pr_buf[0] = 0;
+	for (i = j = 0; i < limit; i++, j += elem_size)
+		scnprintf(pr_buf + j, CY_MAX_PRBUF_SIZE - j, "%02X ", sptr[i]);
+
+	if (size)
+		pr_info("%s[0..%d]=%s%s\n", data_name, size - 1, pr_buf,
+			size <= max ? "" : CY_PR_TRUNCATED);
+	else
+		pr_info("%s[]\n", data_name);
+}
+
+static void cyttsp5_debug_formated(struct device *dev, u8 *pr_buf,
+		struct cyttsp5_sysinfo *si, u8 num_cur_tch)
+{
+	u8 report_id = si->xy_mode[2];
+	int header_size = 0;
+	int report_size = 0;
+	u8 data_name[] = "touch[99]";
+	int max_print_length = 20;
+	int i;
+
+	if (report_id == si->desc.tch_report_id) {
+		header_size = si->desc.tch_header_size;
+		report_size = num_cur_tch * si->desc.tch_record_size;
+	} else if (report_id == si->desc.btn_report_id) {
+		header_size = BTN_INPUT_HEADER_SIZE;
+		report_size = BTN_REPORT_SIZE;
+	}
+
+	/* xy_mode */
+	cyttsp5_debug_print(dev, pr_buf, si->xy_mode, header_size, "xy_mode");
+
+	/* xy_data */
+	if (report_size > max_print_length) {
+		pr_info("xy_data[0..%d]:\n", report_size);
+		for (i = 0; i < report_size - max_print_length;
+				i += max_print_length)
+			cyttsp5_debug_print(dev, pr_buf, si->xy_data + i,
+					max_print_length, " ");
+
+		if (report_size - i)
+			cyttsp5_debug_print(dev, pr_buf, si->xy_data + i,
+					report_size - i, " ");
+	} else
+		cyttsp5_debug_print(dev, pr_buf, si->xy_data, report_size,
+				"xy_data");
+
+
+	/* touches */
+	if (report_id == si->desc.tch_report_id)
+		for (i = 0; i < num_cur_tch; i++) {
+			scnprintf(data_name, sizeof(data_name) - 1,
+					"touch[%u]", i);
+			cyttsp5_debug_print(dev, pr_buf,
+				si->xy_data + (i * si->desc.tch_record_size),
+				si->desc.tch_record_size, data_name);
+		}
+
+	/* buttons */
+	if (report_id == si->desc.btn_report_id)
+		cyttsp5_debug_print(dev, pr_buf, si->xy_data, report_size,
+				"button");
+}
+
+/* read xy_data for all touches for debug */
+static int cyttsp5_xy_worker(struct cyttsp5_debug_data *dd)
+{
+	struct device *dev = dd->dev;
+	struct cyttsp5_sysinfo *si = dd->si;
+	u8 report_reg = si->xy_mode[TOUCH_COUNT_BYTE_OFFSET];
+	u8 num_cur_tch = GET_NUM_TOUCHES(report_reg);
+	uint32_t formated_output;
+
+	mutex_lock(&dd->sysfs_lock);
+	dd->interrupt_count++;
+	formated_output = dd->formated_output;
+	mutex_unlock(&dd->sysfs_lock);
+
+	/* Interrupt */
+	pr_info("Interrupt(%u)\n", dd->interrupt_count);
+
+	if (formated_output)
+		cyttsp5_debug_formated(dev, dd->pr_buf, si, num_cur_tch);
+	else
+		/* print data for TTHE */
+		cyttsp5_pr_buf_op_mode(dd, dd->pr_buf, si, num_cur_tch);
+
+	pr_info("\n");
+
+	return 0;
+}
+
+static int cyttsp5_debug_attention(struct device *dev)
+{
+	struct cyttsp5_debug_data *dd = cyttsp5_get_debug_data(dev);
+	struct cyttsp5_sysinfo *si = dd->si;
+	u8 report_id = si->xy_mode[2];
+	int rc = 0;
+
+	if (report_id != si->desc.tch_report_id
+			&& report_id != si->desc.btn_report_id)
+		return 0;
+
+	/* core handles handshake */
+	rc = cyttsp5_xy_worker(dd);
+	if (rc < 0)
+		dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static ssize_t cyttsp5_interrupt_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_debug_data *dd = cyttsp5_get_debug_data(dev);
+	int val;
+
+	mutex_lock(&dd->sysfs_lock);
+	val = dd->interrupt_count;
+	mutex_unlock(&dd->sysfs_lock);
+
+	return scnprintf(buf, CY_MAX_PRBUF_SIZE, "Interrupt Count: %d\n", val);
+}
+
+static ssize_t cyttsp5_interrupt_count_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_debug_data *dd = cyttsp5_get_debug_data(dev);
+
+	mutex_lock(&dd->sysfs_lock);
+	dd->interrupt_count = 0;
+	mutex_unlock(&dd->sysfs_lock);
+	return size;
+}
+
+static DEVICE_ATTR(int_count, 0600,
+	cyttsp5_interrupt_count_show, cyttsp5_interrupt_count_store);
+
+static ssize_t cyttsp5_formated_output_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_debug_data *dd = cyttsp5_get_debug_data(dev);
+	int val;
+
+	mutex_lock(&dd->sysfs_lock);
+	val = dd->formated_output;
+	mutex_unlock(&dd->sysfs_lock);
+
+	return scnprintf(buf, CY_MAX_PRBUF_SIZE,
+			"Formated debug output: %x\n", val);
+}
+
+static ssize_t cyttsp5_formated_output_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_debug_data *dd = cyttsp5_get_debug_data(dev);
+	unsigned long value;
+	int rc;
+
+	rc = kstrtoul(buf, 10, &value);
+	if (rc < 0) {
+		dev_err(dev, "%s: Invalid value\n", __func__);
+		return size;
+	}
+
+	/* Expecting only 0 or 1 */
+	if (value != 0 && value != 1) {
+		dev_err(dev, "%s: Invalid value %lu\n", __func__, value);
+		return size;
+	}
+
+	mutex_lock(&dd->sysfs_lock);
+	dd->formated_output = value;
+	mutex_unlock(&dd->sysfs_lock);
+	return size;
+}
+
+static DEVICE_ATTR(formated_output, 0600,
+	cyttsp5_formated_output_show, cyttsp5_formated_output_store);
+
+static int cyttsp5_debug_probe(struct device *dev, void **data)
+{
+	struct cyttsp5_debug_data *dd;
+	int rc;
+
+	/* get context and debug print buffers */
+	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd) {
+		rc = -ENOMEM;
+		goto cyttsp5_debug_probe_alloc_failed;
+	}
+
+	rc = device_create_file(dev, &dev_attr_int_count);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create int_count\n",
+				__func__);
+		goto cyttsp5_debug_probe_create_int_count_failed;
+	}
+
+	rc = device_create_file(dev, &dev_attr_formated_output);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create formated_output\n",
+				__func__);
+		goto cyttsp5_debug_probe_create_formated_failed;
+	}
+
+	mutex_init(&dd->sysfs_lock);
+	dd->dev = dev;
+	*data = dd;
+
+	dd->si = cmd->request_sysinfo(dev);
+	if (!dd->si) {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core\n",
+				__func__);
+		rc = -ENODEV;
+		goto cyttsp5_debug_probe_sysinfo_failed;
+	}
+
+	rc = cmd->subscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_DEBUG_NAME,
+		cyttsp5_debug_attention, CY_MODE_OPERATIONAL);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, could not subscribe attention cb\n",
+				__func__);
+		goto cyttsp5_debug_probe_subscribe_failed;
+	}
+
+	return 0;
+
+cyttsp5_debug_probe_subscribe_failed:
+cyttsp5_debug_probe_sysinfo_failed:
+	device_remove_file(dev, &dev_attr_formated_output);
+cyttsp5_debug_probe_create_formated_failed:
+	device_remove_file(dev, &dev_attr_int_count);
+cyttsp5_debug_probe_create_int_count_failed:
+	kfree(dd);
+cyttsp5_debug_probe_alloc_failed:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+static void cyttsp5_debug_release(struct device *dev, void *data)
+{
+	struct cyttsp5_debug_data *dd = data;
+	int rc;
+
+	rc = cmd->unsubscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_DEBUG_NAME,
+		cyttsp5_debug_attention, CY_MODE_OPERATIONAL);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error, could not un-subscribe attention\n",
+				__func__);
+		goto cyttsp5_debug_release_exit;
+	}
+
+cyttsp5_debug_release_exit:
+	device_remove_file(dev, &dev_attr_formated_output);
+	device_remove_file(dev, &dev_attr_int_count);
+	kfree(dd);
+}
+
+static struct cyttsp5_module debug_module = {
+	.name = CYTTSP5_DEBUG_NAME,
+	.probe = cyttsp5_debug_probe,
+	.release = cyttsp5_debug_release,
+};
+
+static int __init cyttsp5_debug_init(void)
+{
+	int rc;
+
+	cmd = cyttsp5_get_commands();
+	if (!cmd)
+		return -EINVAL;
+
+	rc = cyttsp5_register_module(&debug_module);
+	if (rc < 0) {
+		pr_err("%s: Error, failed registering module\n", __func__);
+		return rc;
+	}
+
+	pr_info("%s: Parade TTSP Debug Driver (Built %s) rc=%d\n",
+		__func__, CY_DRIVER_VERSION, rc);
+	return 0;
+}
+module_init(cyttsp5_debug_init);
+
+static void __exit cyttsp5_debug_exit(void)
+{
+	cyttsp5_unregister_module(&debug_module);
+}
+module_exit(cyttsp5_debug_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product Debug Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_device_access.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_device_access.c
new file mode 100644
index 0000000..2cdea6f
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_device_access.c
@@ -0,0 +1,5220 @@
+/*
+ * cyttsp5_device_access.c
+ * Parade TrueTouch(TM) Standard Product V5 Device Access Module.
+ * Configuration and Test command/status user interface.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/timer.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+
+#include "cyttsp5_regs.h"
+
+#define CY_CMCP_THRESHOLD_FILE_NAME "cyttsp5_thresholdfile.csv"
+#define CMCP_THRESHOLD_FILE_NAME "ttdl_cmcp_thresholdfile.csv"
+
+/* Max test case number */
+#define MAX_CASE_NUM            (22)
+
+/* ASCII */
+#define ASCII_LF                (0x0A)
+#define ASCII_CR                (0x0D)
+#define ASCII_COMMA             (0x2C)
+#define ASCII_ZERO              (0x30)
+#define ASCII_NINE              (0x39)
+
+/* Max characters of test case name */
+#define NAME_SIZE_MAX           (50)
+
+/* Max sensor and button number */
+#define MAX_BUTTONS             (HID_SYSINFO_MAX_BTN)
+#define MAX_SENSORS             (1024)
+#define MAX_TX_SENSORS          (128)
+#define MAX_RX_SENSORS          (128)
+
+/* Multiply by 2 for double (min, max) values */
+#define TABLE_BUTTON_MAX_SIZE   (MAX_BUTTONS * 2)
+#define TABLE_SENSOR_MAX_SIZE   (MAX_SENSORS * 2)
+#define TABLE_TX_MAX_SIZE       (MAX_TX_SENSORS*2)
+#define TABLE_RX_MAX_SIZE       (MAX_RX_SENSORS*2)
+
+#define CM_PANEL_DATA_OFFSET    (6)
+#define CM_BTN_DATA_OFFSET      (6)
+#define CP_PANEL_DATA_OFFSET    (6)
+#define CP_BTN_DATA_OFFSET      (6)
+#define MAX_BUF_LEN             (50000)
+
+#define CY_ERROR0 ",Self-cap Calibration Check - Button Element by Element,"
+
+/* cmcp csv file information */
+struct configuration {
+	u32 cm_range_limit_row;
+	u32 cm_range_limit_col;
+	u32 cm_min_limit_cal;
+	u32 cm_max_limit_cal;
+	u32 cm_max_delta_sensor_percent;
+	u32 cm_max_delta_button_percent;
+	u32 min_sensor_rx;
+	u32 max_sensor_rx;
+	u32 min_sensor_tx;
+	u32 max_sensor_tx;
+	u32 min_button;
+	u32 max_button;
+	u32 max_delta_sensor;
+	u32 cp_max_delta_sensor_rx_percent;
+	u32 cp_max_delta_sensor_tx_percent;
+	u32 cm_min_max_table_button[TABLE_BUTTON_MAX_SIZE];
+	u32 cp_min_max_table_button[TABLE_BUTTON_MAX_SIZE];
+	u32 cm_min_max_table_sensor[TABLE_SENSOR_MAX_SIZE];
+	u32 cp_min_max_table_rx[TABLE_RX_MAX_SIZE];
+	u32 cp_min_max_table_tx[TABLE_TX_MAX_SIZE];
+	u32 cm_min_max_table_button_size;
+	u32 cp_min_max_table_button_size;
+	u32 cm_min_max_table_sensor_size;
+	u32 cp_min_max_table_rx_size;
+	u32 cp_min_max_table_tx_size;
+	u32 cp_max_delta_button_percent;
+	u32 cm_max_table_gradient_cols_percent[TABLE_TX_MAX_SIZE];
+	u32 cm_max_table_gradient_cols_percent_size;
+	u32 cm_max_table_gradient_rows_percent[TABLE_RX_MAX_SIZE];
+	u32 cm_max_table_gradient_rows_percent_size;
+	u32 cm_excluding_row_edge;
+	u32 cm_excluding_col_edge;
+	u32 rx_num;
+	u32 tx_num;
+	u32 btn_num;
+	u32 cm_enabled;
+	u32 cp_enabled;
+	u32 is_valid_or_not;
+};
+
+/* Test case search definition */
+struct test_case_search {
+	char name[NAME_SIZE_MAX]; /* Test case name */
+	u32 name_size;            /* Test case name size */
+	u32 offset;               /* Test case offset */
+};
+
+/* Test case field definition */
+struct test_case_field {
+	char *name;     /* Test case name */
+	u32 name_size;  /* Test case name size */
+	u32 type;       /* Test case type */
+	u32 *bufptr;    /* Buffer to store value information */
+	u32 exist_or_not;/* Test case exist or not */
+	u32 data_num;   /* Buffer data number */
+	u32 line_num;   /* Buffer line number */
+};
+
+/* Test case type */
+enum test_case_type {
+	TEST_CASE_TYPE_NO,
+	TEST_CASE_TYPE_ONE,
+	TEST_CASE_TYPE_MUL,
+	TEST_CASE_TYPE_MUL_LINES,
+};
+
+/* Test case order in test_case_field_array */
+enum case_order {
+	CM_TEST_INPUTS,
+	CM_EXCLUDING_COL_EDGE,
+	CM_EXCLUDING_ROW_EDGE,
+	CM_GRADIENT_CHECK_COL,
+	CM_GRADIENT_CHECK_ROW,
+	CM_RANGE_LIMIT_ROW,
+	CM_RANGE_LIMIT_COL,
+	CM_MIN_LIMIT_CAL,
+	CM_MAX_LIMIT_CAL,
+	CM_MAX_DELTA_SENSOR_PERCENT,
+	CM_MAX_DELTA_BUTTON_PERCENT,
+	PER_ELEMENT_MIN_MAX_TABLE_BUTTON,
+	PER_ELEMENT_MIN_MAX_TABLE_SENSOR,
+	CP_TEST_INPUTS,
+	CP_MAX_DELTA_SENSOR_RX_PERCENT,
+	CP_MAX_DELTA_SENSOR_TX_PERCENT,
+	CP_MAX_DELTA_BUTTON_PERCENT,
+	CP_PER_ELEMENT_MIN_MAX_BUTTON,
+	MIN_BUTTON,
+	MAX_BUTTON,
+	PER_ELEMENT_MIN_MAX_RX,
+	PER_ELEMENT_MIN_MAX_TX,
+	CASE_ORDER_MAX,
+};
+
+enum cmcp_test_item {
+	CMCP_FULL = 0,
+	CMCP_CM_PANEL,
+	CMCP_CP_PANEL,
+	CMCP_CM_BTN,
+	CMCP_CP_BTN,
+};
+
+#define CM_ENABLED 0x10
+#define CP_ENABLED 0x20
+#define CM_PANEL (0x01 | CM_ENABLED)
+#define CP_PANEL (0x02 | CP_ENABLED)
+#define CM_BTN (0x04 | CM_ENABLED)
+#define CP_BTN (0x08 | CP_ENABLED)
+#define CMCP_FULL_CASE \
+	(CM_PANEL | CP_PANEL | CM_BTN | CP_BTN | CM_ENABLED | CP_ENABLED)
+
+#define CYTTSP5_DEVICE_ACCESS_NAME "cyttsp5_device_access"
+#define CYTTSP5_INPUT_ELEM_SZ (sizeof("0xHH") + 1)
+
+#define STATUS_SUCCESS     0
+#define STATUS_FAIL        -1
+#define PIP_CMD_MAX_LENGTH ((1 << 16) - 1)
+
+#ifdef TTHE_TUNER_SUPPORT
+struct heatmap_param {
+	bool scan_start;
+	enum scan_data_type_list data_type; /* raw, base, diff */
+	int num_element;
+};
+#endif
+#define ABS(x)                 (((x) < 0) ? -(x) : (x))
+
+#define CY_MAX_CONFIG_BYTES    256
+#define CYTTSP5_TTHE_TUNER_GET_PANEL_DATA_FILE_NAME "get_panel_data"
+#define TTHE_TUNER_MAX_BUF     (CY_MAX_PRBUF_SIZE * 3)
+
+struct cyttsp5_device_access_data {
+	struct device *dev;
+	struct cyttsp5_sysinfo *si;
+	struct mutex sysfs_lock;
+	u8 status;
+	u16 response_length;
+	bool sysfs_nodes_created;
+	struct kobject mfg_test;
+	u8 panel_scan_data_id;
+	u8 get_idac_data_id;
+	u8 calibrate_sensing_mode;
+	u8 calibrate_initialize_baselines;
+	u8 baseline_sensing_mode;
+#ifdef TTHE_TUNER_SUPPORT
+	struct heatmap_param heatmap;
+	struct dentry *tthe_get_panel_data_debugfs;
+	struct mutex debugfs_lock;
+	u8 tthe_get_panel_data_buf[TTHE_TUNER_MAX_BUF];
+	u8 tthe_get_panel_data_is_open;
+#endif
+	struct dentry *cmcp_results_debugfs;
+
+	struct dentry *base_dentry;
+	struct dentry *mfg_test_dentry;
+	u8 ic_buf[CY_MAX_PRBUF_SIZE];
+	u8 response_buf[CY_MAX_PRBUF_SIZE];
+	struct mutex cmcp_threshold_lock;
+	u8 *cmcp_threshold_data;
+	int cmcp_threshold_size;
+	bool cmcp_threshold_loading;
+	struct work_struct cmcp_threshold_update;
+	struct completion builtin_cmcp_threshold_complete;
+	int builtin_cmcp_threshold_status;
+	bool is_manual_upgrade_enabled;
+	struct configuration *configs;
+	struct cmcp_data *cmcp_info;
+	struct result *result;
+	struct test_case_search *test_search_array;
+	struct test_case_field *test_field_array;
+	int cmcp_test_items;
+	int test_executed;
+	int cmcp_range_check;
+	int cmcp_force_calibrate;
+	int cmcp_test_in_progress;
+};
+
+struct cmcp_data {
+	struct gd_sensor *gd_sensor_col;
+	struct gd_sensor *gd_sensor_row;
+	int32_t *cm_data_panel;
+	int32_t *cp_tx_data_panel;
+	int32_t *cp_rx_data_panel;
+	int32_t *cp_tx_cal_data_panel;
+	int32_t *cp_rx_cal_data_panel;
+	int32_t cp_sensor_rx_delta;
+	int32_t cp_sensor_tx_delta;
+	int32_t cp_button_delta;
+	int32_t *cm_btn_data;
+	int32_t *cp_btn_data;
+	int32_t *cm_sensor_column_delta;
+	int32_t *cm_sensor_row_delta;
+	int32_t cp_btn_cal;
+	int32_t cm_btn_cal;
+	int32_t cp_button_ave;
+	int32_t cm_ave_data_panel;
+	int32_t cp_tx_ave_data_panel;
+	int32_t cp_rx_ave_data_panel;
+	int32_t cm_cal_data_panel;
+	int32_t cm_ave_data_btn;
+	int32_t cm_cal_data_btn;
+	int32_t cm_delta_data_btn;
+	int32_t cm_sensor_delta;
+
+	int32_t tx_num;
+	int32_t rx_num;
+	int32_t btn_num;
+};
+
+struct result {
+	int32_t sensor_assignment;
+	int32_t config_ver;
+	int32_t revision_ctrl;
+	int32_t device_id_high;
+	int32_t device_id_low;
+	bool cm_test_run;
+	bool cp_test_run;
+	/* Sensor Cm validation */
+	bool cm_test_pass;
+	bool cm_sensor_validation_pass;
+	bool cm_sensor_row_delta_pass;
+	bool cm_sensor_col_delta_pass;
+	bool cm_sensor_gd_row_pass;
+	bool cm_sensor_gd_col_pass;
+	bool cm_sensor_calibration_pass;
+	bool cm_sensor_delta_pass;
+	bool cm_button_validation_pass;
+	bool cm_button_delta_pass;
+
+	int32_t *cm_sensor_raw_data;
+	int32_t cm_sensor_calibration;
+	int32_t cm_sensor_delta;
+	int32_t *cm_button_raw_data;
+	int32_t cm_button_delta;
+
+	/* Sensor Cp validation */
+	bool cp_test_pass;
+	bool cp_sensor_delta_pass;
+	bool cp_sensor_rx_delta_pass;
+	bool cp_sensor_tx_delta_pass;
+	bool cp_sensor_average_pass;
+	bool cp_button_delta_pass;
+	bool cp_button_average_pass;
+	bool cp_rx_validation_pass;
+	bool cp_tx_validation_pass;
+	bool cp_button_validation_pass;
+
+	int32_t *cp_sensor_rx_raw_data;
+	int32_t *cp_sensor_tx_raw_data;
+	int32_t cp_sensor_rx_delta;
+	int32_t cp_sensor_tx_delta;
+	int32_t cp_sensor_rx_calibration;
+	int32_t cp_sensor_tx_calibration;
+	int32_t *cp_button_raw_data;
+	int32_t cp_button_delta;
+
+	/*other validation*/
+	bool short_test_pass;
+	bool test_summary;
+	uint8_t *cm_open_pwc;
+};
+
+static struct cyttsp5_core_commands *cmd;
+
+static struct cyttsp5_module device_access_module;
+
+static ssize_t cyttsp5_run_and_get_selftest_result_noprint(struct device *dev,
+		char *buf, size_t buf_len, u8 test_id, u16 read_length,
+		bool get_result_on_pass);
+
+static int _cyttsp5_calibrate_idacs_cmd(struct device *dev,
+		u8 sensing_mode, u8 *status);
+
+static inline struct cyttsp5_device_access_data *cyttsp5_get_device_access_data(
+		struct device *dev)
+{
+	return cyttsp5_get_module_data(dev, &device_access_module);
+}
+
+static ssize_t cyttsp5_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	u8 val;
+
+	mutex_lock(&dad->sysfs_lock);
+	val = dad->status;
+	mutex_unlock(&dad->sysfs_lock);
+
+	return scnprintf(buf, CY_MAX_PRBUF_SIZE, "%d\n", val);
+}
+
+static DEVICE_ATTR(status, 0400, cyttsp5_status_show, NULL);
+
+static ssize_t cyttsp5_response_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	int i;
+	ssize_t num_read;
+	int index;
+
+	mutex_lock(&dad->sysfs_lock);
+	index = scnprintf(buf, CY_MAX_PRBUF_SIZE, "Status %d\n", dad->status);
+	if (!dad->status)
+		goto error;
+
+	num_read = dad->response_length;
+
+	for (i = 0; i < num_read; i++)
+		index += scnprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+			"0x%02X\n", dad->response_buf[i]);
+
+	index += scnprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+			"(%zd bytes)\n", num_read);
+
+error:
+	mutex_unlock(&dad->sysfs_lock);
+	return index;
+}
+
+static DEVICE_ATTR(response, 0400, cyttsp5_response_show, NULL);
+
+/*
+ * Gets user input from sysfs and parse it
+ * return size of parsed output buffer
+ */
+static int cyttsp5_ic_parse_input(struct device *dev, const char *buf,
+		size_t buf_size, u8 *ic_buf, size_t ic_buf_size)
+{
+	const char *pbuf = buf;
+	unsigned long value;
+	char scan_buf[CYTTSP5_INPUT_ELEM_SZ];
+	u32 i = 0;
+	u32 j;
+	int last = 0;
+	int ret;
+
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: pbuf=%pK buf=%pK size=%zu %s=%zu buf=%s\n",
+		__func__, pbuf, buf, buf_size, "scan buf size",
+		CYTTSP5_INPUT_ELEM_SZ, buf);
+
+	while (pbuf <= (buf + buf_size)) {
+		if (i >= CY_MAX_CONFIG_BYTES) {
+			dev_err(dev, "%s: %s size=%d max=%d\n", __func__,
+				"Max cmd size exceeded", i,
+				CY_MAX_CONFIG_BYTES);
+			return -EINVAL;
+		}
+		if (i >= ic_buf_size) {
+			dev_err(dev, "%s: %s size=%d buf_size=%zu\n", __func__,
+				"Buffer size exceeded", i, ic_buf_size);
+			return -EINVAL;
+		}
+		while (((*pbuf == ' ') || (*pbuf == ','))
+				&& (pbuf < (buf + buf_size))) {
+			last = *pbuf;
+			pbuf++;
+		}
+
+		if (pbuf >= (buf + buf_size))
+			break;
+
+		memset(scan_buf, 0, CYTTSP5_INPUT_ELEM_SZ);
+		if ((last == ',') && (*pbuf == ',')) {
+			dev_err(dev, "%s: %s \",,\" not allowed.\n", __func__,
+				"Invalid data format.");
+			return -EINVAL;
+		}
+		for (j = 0; j < (CYTTSP5_INPUT_ELEM_SZ - 1)
+				&& (pbuf < (buf + buf_size))
+				&& (*pbuf != ' ')
+				&& (*pbuf != ','); j++) {
+			last = *pbuf;
+			scan_buf[j] = *pbuf++;
+		}
+
+		ret = kstrtoul(scan_buf, 16, &value);
+		if (ret < 0) {
+			dev_err(dev, "%s: %s '%s' %s%s i=%d r=%d\n", __func__,
+				"Invalid data format. ", scan_buf,
+				"Use \"0xHH,...,0xHH\"", " instead.",
+				i, ret);
+			return ret;
+		}
+
+		ic_buf[i] = value;
+		i++;
+	}
+
+	return i;
+}
+
+static ssize_t cyttsp5_command_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	ssize_t length;
+	int rc;
+
+	mutex_lock(&dad->sysfs_lock);
+	dad->status = 0;
+	dad->response_length = 0;
+	length = cyttsp5_ic_parse_input(dev, buf, size, dad->ic_buf,
+			CY_MAX_PRBUF_SIZE);
+	if (length <= 0) {
+		dev_err(dev, "%s: %s Group Data store\n", __func__,
+			"Malformed input for");
+		goto exit;
+	}
+
+	/* write ic_buf to log */
+	cyttsp5_pr_buf(dev, dad->ic_buf, length, "ic_buf");
+
+	pm_runtime_get_sync(dev);
+	rc = cmd->nonhid_cmd->user_cmd(dev, 1, CY_MAX_PRBUF_SIZE,
+			dad->response_buf, length, dad->ic_buf,
+			&dad->response_length);
+	pm_runtime_put(dev);
+	if (rc) {
+		dad->response_length = 0;
+		dev_err(dev, "%s: Failed to store command\n", __func__);
+	} else
+		dad->status = 1;
+
+exit:
+	mutex_unlock(&dad->sysfs_lock);
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: return size=%zu\n",
+		__func__, size);
+	return size;
+}
+
+static DEVICE_ATTR(command, 0200, NULL, cyttsp5_command_store);
+
+static int cmcp_check_config_fw_match(struct device *dev,
+	struct configuration *configuration)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	int32_t tx_num = dad->configs->tx_num;
+	int32_t rx_num = dad->configs->rx_num;
+	int32_t button_num = dad->configs->btn_num;
+	int ret = 0;
+
+	if (tx_num != dad->si->sensing_conf_data.tx_num) {
+		dev_err(dev, "%s: TX number mismatch!\n", __func__);
+		ret = -EINVAL;
+	}
+
+	if (rx_num != dad->si->sensing_conf_data.rx_num) {
+		dev_err(dev, "%s: RX number mismatch!\n", __func__);
+		ret = -EINVAL;
+	}
+
+	if (button_num != dad->si->num_btns) {
+		dev_err(dev, "%s: Button number mismatch!\n", __func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int validate_cm_test_results(struct device *dev,
+	struct configuration *configuration, struct cmcp_data *cmcp_info,
+	struct result *result, bool *pass, int test_item)
+{
+	int32_t tx_num = cmcp_info->tx_num;
+	int32_t rx_num = cmcp_info->rx_num;
+	int32_t button_num =  cmcp_info->btn_num;
+	uint32_t sensor_num = tx_num * rx_num;
+	int32_t *cm_sensor_data = cmcp_info->cm_data_panel;
+	int32_t cm_button_delta;
+	int32_t cm_sensor_calibration;
+	int32_t *cm_button_data = cmcp_info->cm_btn_data;
+	struct gd_sensor *gd_sensor_col = cmcp_info->gd_sensor_col;
+	struct gd_sensor *gd_sensor_row = cmcp_info->gd_sensor_row;
+	int32_t *cm_sensor_column_delta = cmcp_info->cm_sensor_column_delta;
+	int32_t *cm_sensor_row_delta = cmcp_info->cm_sensor_row_delta;
+	int ret = 0;
+	int i, j;
+	int row, col;
+	int32_t cm_sensor_min, cm_sensor_max;
+	int len, percent, size;
+	int32_t cm_sensor_row_diff;
+	int32_t cm_sensor_col_diff;
+	int32_t cm_button_min, cm_button_max;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: start\n", __func__);
+
+	if ((test_item & CM_PANEL) == CM_PANEL) {
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"Check each sensor Cm data for min max value\n ");
+
+		/* Check each sensor Cm data for min/max values */
+		result->cm_sensor_validation_pass = true;
+
+	for (i = 0; i < sensor_num; i++) {
+		row = i % rx_num;
+		col = i / rx_num;
+		len = (row * tx_num + col) * 2;
+		cm_sensor_min =
+			configuration->cm_min_max_table_sensor[len];
+		cm_sensor_max =
+			configuration->cm_min_max_table_sensor[len + 1];
+
+		if ((cm_sensor_data[i] < cm_sensor_min)
+			|| (cm_sensor_data[i] > cm_sensor_max)) {
+			dev_err(dev, "%s: Sensor[%d,%d]:%d (%d,%d)\n",
+				"Cm sensor min/max test",
+				row, col,
+				cm_sensor_data[i],
+				cm_sensor_min, cm_sensor_max);
+			result->cm_sensor_validation_pass = false;
+		}
+	}
+
+	/*check cm gradient column data*/
+	result->cm_sensor_gd_col_pass = true;
+	size = configuration->cm_max_table_gradient_cols_percent_size;
+	for (i = 0; i < size; i++) {
+		percent = configuration->cm_max_table_gradient_cols_percent[i];
+		if ((gd_sensor_col + i)->gradient_val > 10 * percent) {
+			dev_err(dev,
+				"%s: cm_max_table_gradient_cols_percent",
+				__func__);
+
+			dev_err(dev,
+				"[%d]:%d, gradient_val:%d\n",
+				i, percent, (gd_sensor_col + i)->gradient_val);
+			result->cm_sensor_gd_col_pass = false;
+		}
+	}
+
+	/*check cm gradient row data*/
+	result->cm_sensor_gd_row_pass = true;
+	size = configuration->cm_max_table_gradient_rows_percent_size;
+	for (j = 0; j < size; j++) {
+		percent = configuration->cm_max_table_gradient_rows_percent[j];
+		if ((gd_sensor_row + j)->gradient_val > 10 * percent) {
+			dev_err(dev,
+				"%s: cm_max_table_gradient_rows_percent",
+				__func__);
+			dev_err(dev,
+				"[%d]:%d, gradient_val:%d\n",
+				j, percent, (gd_sensor_row + j)->gradient_val);
+			result->cm_sensor_gd_row_pass = false;
+		}
+	}
+
+	result->cm_sensor_row_delta_pass = true;
+	result->cm_sensor_col_delta_pass = true;
+	result->cm_sensor_calibration_pass = true;
+	result->cm_sensor_delta_pass = true;
+
+	/*
+	 * Check each row Cm data
+	 * with neighbor for difference
+	 */
+	for (i = 0; i < tx_num; i++) {
+		for (j = 1; j < rx_num; j++) {
+			cm_sensor_row_diff = ABS(cm_sensor_data[i * rx_num + j]
+				- cm_sensor_data[i * rx_num + j - 1]);
+			cm_sensor_row_delta[i * rx_num + j - 1] =
+				cm_sensor_row_diff;
+			if (cm_sensor_row_diff
+				> configuration->cm_range_limit_row) {
+				dev_err(dev,
+					"%s: Sensor[%d,%d]:%d (%d)\n",
+					"Cm sensor row range limit test",
+					j, i,
+					cm_sensor_row_diff,
+					configuration->cm_range_limit_row);
+				result->cm_sensor_row_delta_pass = false;
+			}
+		}
+	}
+
+	/*
+	 * Check each column Cm data
+	 * with neighbor for difference
+	 */
+	for (i = 1; i < tx_num; i++) {
+		for (j = 0; j < rx_num; j++) {
+			cm_sensor_col_diff =
+				ABS((int)cm_sensor_data[i * rx_num + j]
+				- (int)cm_sensor_data[(i - 1) * rx_num + j]);
+			cm_sensor_column_delta[(i - 1) * rx_num + j] =
+				cm_sensor_col_diff;
+			if (cm_sensor_col_diff >
+				configuration->cm_range_limit_col) {
+				dev_err(dev,
+					"%s: Sensor[%d,%d]:%d (%d)\n",
+					"Cm sensor column range limit test",
+					j, i,
+					cm_sensor_col_diff,
+					configuration->cm_range_limit_col);
+				result->cm_sensor_col_delta_pass = false;
+			}
+		}
+	}
+
+	/* Check sensor calculated Cm for min/max values */
+	cm_sensor_calibration = cmcp_info->cm_cal_data_panel;
+	if (cm_sensor_calibration < configuration->cm_min_limit_cal
+		|| cm_sensor_calibration > configuration->cm_max_limit_cal) {
+		dev_err(dev, "%s: Cm_cal:%d (%d,%d)\n",
+			"Cm sensor Cm_cal min/max test",
+			cm_sensor_calibration,
+			configuration->cm_min_limit_cal,
+			configuration->cm_max_limit_cal);
+		result->cm_sensor_calibration_pass = false;
+	}
+
+	/* Check sensor Cm delta for range limit */
+	percent = configuration->cm_max_delta_sensor_percent;
+	if (cmcp_info->cm_sensor_delta > 10 * percent) {
+		dev_err(dev,
+			"%s: Cm_sensor_delta:%d (%d)\n",
+			"Cm sensor delta range limit test",
+			cmcp_info->cm_sensor_delta,
+			percent);
+		result->cm_sensor_delta_pass = false;
+	}
+
+	result->cm_test_pass = result->cm_sensor_gd_col_pass
+			&& result->cm_sensor_gd_row_pass
+			&& result->cm_sensor_validation_pass
+			&& result->cm_sensor_row_delta_pass
+			&& result->cm_sensor_col_delta_pass
+			&& result->cm_sensor_calibration_pass
+			&& result->cm_sensor_delta_pass;
+	}
+
+	if (((test_item & CM_BTN) == CM_BTN) && (cmcp_info->btn_num)) {
+		/* Check each button Cm data for min/max values */
+		result->cm_button_validation_pass = true;
+		for (i = 0; i < button_num; i++) {
+			len = i * 2;
+			cm_button_min =
+				configuration->cm_min_max_table_button[len];
+			cm_button_max =
+				configuration->cm_min_max_table_button[len + 1];
+			if ((cm_button_data[i] <= cm_button_min)
+				|| (cm_button_data[i] >= cm_button_max)) {
+				dev_err(dev,
+					"%s: Button[%d]:%d (%d,%d)\n",
+					"Cm button min/max test",
+					i,
+					cm_button_data[i],
+					cm_button_min, cm_button_max);
+				result->cm_button_validation_pass = false;
+			}
+		}
+
+		/* Check button Cm delta for range limit */
+		result->cm_button_delta_pass = true;
+
+		cm_button_delta = ABS((cmcp_info->cm_ave_data_btn -
+			cmcp_info->cm_cal_data_btn) * 100 /
+			cmcp_info->cm_ave_data_btn);
+		if (cm_button_delta >
+			configuration->cm_max_delta_button_percent) {
+			dev_err(dev,
+				"%s: Cm_button_delta:%d (%d)\n",
+				"Cm button delta range limit test",
+				cm_button_delta,
+				configuration->cm_max_delta_button_percent);
+			result->cm_button_delta_pass = false;
+		}
+
+		result->cm_test_pass = result->cm_test_pass
+				&& result->cm_button_validation_pass
+				&& result->cm_button_delta_pass;
+	}
+
+	if (pass)
+		*pass = result->cm_test_pass;
+
+	return ret;
+}
+static int validate_cp_test_results(struct device *dev,
+		struct configuration *configuration,
+		struct cmcp_data *cmcp_info,
+		struct result *result, bool *pass, int test_item)
+{
+	int i = 0;
+	uint32_t configuration_rx_num;
+	uint32_t configuration_tx_num;
+	int32_t *cp_sensor_tx_data = cmcp_info->cp_tx_data_panel;
+	int32_t *cp_sensor_rx_data = cmcp_info->cp_rx_data_panel;
+	int32_t cp_button_delta;
+	int32_t cp_button_average;
+	int32_t cp_sensor_tx_delta;
+	int32_t cp_sensor_rx_delta;
+	int32_t cp_rx_min, cp_rx_max;
+	int32_t cp_tx_min, cp_tx_max;
+	int32_t cp_button_min, cp_button_max;
+	int32_t percent;
+	int len;
+
+	result->cp_test_pass = true;
+	configuration_rx_num = configuration->cp_min_max_table_rx_size / 2;
+	configuration_tx_num = configuration->cp_min_max_table_tx_size / 2;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s start\n", __func__);
+
+	if ((test_item & CP_PANEL) == CP_PANEL) {
+
+		/* Check Sensor Cp delta for range limit */
+		result->cp_sensor_delta_pass = true;
+		/*check cp_sensor_tx_delta */
+		for (i = 0; i < configuration_tx_num; i++) {
+			cp_sensor_tx_delta =
+				ABS((cmcp_info->cp_tx_cal_data_panel[i] -
+				cmcp_info->cp_tx_data_panel[i]) * 100 /
+				cmcp_info->cp_tx_data_panel[i]);
+
+			percent = configuration->cp_max_delta_sensor_tx_percent;
+			if (cp_sensor_tx_delta > percent) {
+				dev_err(dev,
+					"%s: Cp_sensor_tx_delta:%d (%d)\n",
+					"Cp sensor delta range limit test",
+					cp_sensor_tx_delta,
+					percent);
+				result->cp_sensor_delta_pass = false;
+			}
+		}
+
+		/*check cp_sensor_rx_delta */
+		for (i = 0; i < configuration_rx_num; i++) {
+			cp_sensor_rx_delta =
+				ABS((cmcp_info->cp_rx_cal_data_panel[i] -
+				cmcp_info->cp_rx_data_panel[i]) * 100 /
+				cmcp_info->cp_rx_data_panel[i]);
+			percent = configuration->cp_max_delta_sensor_rx_percent;
+			if (cp_sensor_rx_delta > percent) {
+				dev_err(dev,
+					"%s: Cp_sensor_rx_delta:%d(%d)\n",
+					"Cp sensor delta range limit test",
+					cp_sensor_rx_delta,
+					percent);
+				result->cp_sensor_delta_pass = false;
+			}
+		}
+
+		/* Check sensor Cp rx for min/max values */
+		result->cp_rx_validation_pass = true;
+		for (i = 0; i < configuration_rx_num; i++) {
+			cp_rx_min =
+				configuration->cp_min_max_table_rx[i * 2];
+			cp_rx_max =
+				configuration->cp_min_max_table_rx[i * 2 + 1];
+			if ((cp_sensor_rx_data[i] <= cp_rx_min)
+				|| (cp_sensor_rx_data[i] >= cp_rx_max)) {
+				dev_err(dev,
+					"%s: Cp Rx[%d]:%d (%d,%d)\n",
+					"Cp Rx min/max test",
+					i,
+					(int)cp_sensor_rx_data[i],
+					cp_rx_min, cp_rx_max);
+				result->cp_rx_validation_pass = false;
+			}
+		}
+
+		/* Check sensor Cp tx for min/max values */
+		result->cp_tx_validation_pass = true;
+		for (i = 0; i < configuration_tx_num; i++) {
+			cp_tx_min =
+				configuration->cp_min_max_table_tx[i * 2];
+			cp_tx_max =
+				configuration->cp_min_max_table_tx[i * 2 + 1];
+			if ((cp_sensor_tx_data[i] <= cp_tx_min)
+				|| (cp_sensor_tx_data[i] >= cp_tx_max)) {
+				dev_err(dev,
+					"%s: Cp Tx[%d]:%d(%d,%d)\n",
+					"Cp Tx min/max test",
+					i,
+					cp_sensor_tx_data[i],
+					cp_tx_min, cp_tx_max);
+				result->cp_tx_validation_pass = false;
+			}
+		}
+
+		result->cp_test_pass = result->cp_test_pass
+				&& result->cp_sensor_delta_pass
+				&& result->cp_rx_validation_pass
+				&& result->cp_tx_validation_pass;
+	}
+
+	if (((test_item & CP_BTN) == CP_BTN) && (cmcp_info->btn_num)) {
+		result->cp_button_delta_pass = true;
+
+		/* Check button Cp delta for range limit */
+		cp_button_delta = ABS((cmcp_info->cp_btn_cal
+			- cmcp_info->cp_button_ave) * 100 /
+			cmcp_info->cp_button_ave);
+		percent = configuration->cp_max_delta_button_percent;
+		if (cp_button_delta > percent) {
+			dev_err(dev,
+				"%s: Cp_button_delta:%d (%d)\n",
+				"Cp button delta range limit test",
+				cp_button_delta,
+				percent);
+			result->cp_button_delta_pass = false;
+		}
+
+		/* Check button Cp average for min/max values */
+		result->cp_button_average_pass = true;
+		cp_button_average = cmcp_info->cp_button_ave;
+		if (cp_button_average < configuration->min_button
+			|| cp_button_average > configuration->max_button) {
+			dev_err(dev,
+				"%s: Button Cp average fails min/max test\n",
+				__func__);
+			dev_err(dev,
+				"%s: Cp_button_average:%d (%d,%d)\n",
+				"Cp button average min/max test",
+				cp_button_average,
+				configuration->min_button,
+				configuration->max_button);
+			result->cp_button_average_pass = false;
+		}
+
+		/* Check each button Cp data for min/max values */
+		result->cp_button_validation_pass = true;
+		for (i = 0; i < cmcp_info->btn_num; i++) {
+			len = i * 2;
+			cp_button_min =
+				configuration->cp_min_max_table_button[len];
+			cp_button_max =
+				configuration->cp_min_max_table_button[len + 1];
+			if ((cmcp_info->cp_btn_data[i] <= cp_button_min) ||
+				(cmcp_info->cp_btn_data[i] >= cp_button_max)) {
+				dev_err(dev,
+					"%s: Button[%d]:%d (%d,%d)\n",
+					"Cp button min/max test",
+					i,
+					cmcp_info->cp_btn_data[i],
+					cp_button_min, cp_button_max);
+				result->cp_button_validation_pass = false;
+			}
+		}
+
+		result->cp_test_pass = result->cp_test_pass
+				&& result->cp_button_delta_pass
+				&& result->cp_button_average_pass
+				&& result->cp_button_validation_pass;
+	}
+
+	if (pass)
+		*pass = result->cp_test_pass;
+
+	return 0;
+}
+
+static void calculate_gradient_row(struct gd_sensor *gd_sensor_row_head,
+		 uint16_t row_num, int exclude_row_edge, int exclude_col_edge)
+{
+	int i = 0;
+	uint16_t cm_min_cur = 0;
+	uint16_t cm_max_cur = 0;
+	uint16_t cm_ave_cur = 0;
+	uint16_t cm_ave_next = 0;
+	uint16_t cm_ave_prev = 0;
+	struct gd_sensor *p = gd_sensor_row_head;
+	struct gd_sensor *cur, *next;
+
+	if (exclude_row_edge) {
+		for (i = 0; i < row_num; i++) {
+			cur = p + i;
+			next = cur + 1;
+			if (!exclude_col_edge) {
+				cm_ave_cur = cur->cm_ave;
+				cm_min_cur = cur->cm_min;
+				cm_max_cur = cur->cm_max;
+				if (i < (row_num - 1))
+					cm_ave_next = next->cm_ave;
+				if (i > 0)
+					cm_ave_prev = (cur - 1)->cm_ave;
+			} else {
+				cm_ave_cur = cur->cm_ave_exclude_edge;
+				cm_min_cur = cur->cm_min_exclude_edge;
+				cm_max_cur = cur->cm_max_exclude_edge;
+				if (i < (row_num - 1))
+					cm_ave_next = next->cm_ave_exclude_edge;
+				if (i > 0)
+					cm_ave_prev =
+						(cur - 1)->cm_ave_exclude_edge;
+			}
+
+			if (cm_ave_cur == 0)
+				cm_ave_cur = 1;
+
+			/*multiple 1000 to increate accuracy*/
+			if ((i == 0) || (i == (row_num - 1)))
+				cur->gradient_val = (cm_max_cur - cm_min_cur)
+					* 1000 / cm_ave_cur;
+			else if (i == 1)
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_next))
+					* 1000 / cm_ave_cur;
+			else
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_prev))
+					* 1000 / cm_ave_cur;
+
+		}
+	} else if (!exclude_row_edge) {
+		for (i = 0; i < row_num; i++) {
+			cur = p + i;
+			next = cur + 1;
+			if (!exclude_col_edge) {
+				cm_ave_cur = cur->cm_ave;
+				cm_min_cur = cur->cm_min;
+				cm_max_cur = cur->cm_max;
+				if (i < (row_num - 1))
+					cm_ave_next = next->cm_ave;
+				if (i > 0)
+					cm_ave_prev = (cur - 1)->cm_ave;
+			} else {
+				cm_ave_cur = cur->cm_ave_exclude_edge;
+				cm_min_cur = cur->cm_min_exclude_edge;
+				cm_max_cur = cur->cm_max_exclude_edge;
+				if (i < (row_num - 1))
+					cm_ave_next =
+						next->cm_ave_exclude_edge;
+				if (i > 0)
+					cm_ave_prev =
+						(cur - 1)->cm_ave_exclude_edge;
+			}
+
+			if (cm_ave_cur == 0)
+				cm_ave_cur = 1;
+			/*multiple 1000 to increate accuracy*/
+			if (i <= 1)
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_next))
+					* 1000 / cm_ave_cur;
+			else
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_prev))
+					* 1000 / cm_ave_cur;
+		}
+	}
+}
+
+static void calculate_gradient_col(struct gd_sensor *gd_sensor_row_head,
+	uint16_t col_num, int exclude_row_edge, int exclude_col_edge)
+{
+	int i = 0;
+	int32_t cm_min_cur = 0;
+	int32_t cm_max_cur = 0;
+	int32_t cm_ave_cur = 0;
+	int32_t cm_ave_next = 0;
+	int32_t cm_ave_prev = 0;
+	struct gd_sensor *p = gd_sensor_row_head;
+	struct gd_sensor *cur, *next;
+
+	if (!exclude_col_edge) {
+		for (i = 0; i < col_num; i++) {
+			cur = p + i;
+			next = cur + 1;
+			if (!exclude_row_edge) {
+				cm_ave_cur = cur->cm_ave;
+				cm_min_cur = cur->cm_min;
+				cm_max_cur = cur->cm_max;
+				if (i < (col_num - 1))
+					cm_ave_next = next->cm_ave;
+				if (i > 0)
+					cm_ave_prev = (cur - 1)->cm_ave;
+			} else {
+				cm_ave_cur = cur->cm_ave_exclude_edge;
+				cm_min_cur = cur->cm_min_exclude_edge;
+				cm_max_cur = cur->cm_max_exclude_edge;
+				if (i < (col_num - 1))
+					cm_ave_next = next->cm_ave_exclude_edge;
+				if (i > 0)
+					cm_ave_prev =
+						(cur - 1)->cm_ave_exclude_edge;
+			}
+			if (cm_ave_cur == 0)
+				cm_ave_cur = 1;
+			/*multiple 1000 to increate accuracy*/
+			if (i <= 1)
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_next))
+					* 1000 / cm_ave_cur;
+			else
+				cur->gradient_val = (cm_max_cur - cm_min_cur
+					+ ABS(cm_ave_cur - cm_ave_prev))
+					* 1000 / cm_ave_cur;
+		}
+	} else if (exclude_col_edge) {
+		for (i = 0; i < col_num; i++) {
+			cur = p + i;
+			next = cur + 1;
+			if (!exclude_row_edge) {
+				cm_ave_cur = cur->cm_ave;
+				cm_min_cur = cur->cm_min;
+				cm_max_cur = cur->cm_max;
+				if (i < (col_num - 1))
+					cm_ave_next = next->cm_ave;
+				if (i > 0)
+					cm_ave_prev = (cur - 1)->cm_ave;
+			} else {
+				cm_ave_cur = cur->cm_ave_exclude_edge;
+				cm_min_cur = cur->cm_min_exclude_edge;
+				cm_max_cur = cur->cm_max_exclude_edge;
+				if (i < (col_num - 1))
+					cm_ave_next = next->cm_ave_exclude_edge;
+				if (i > 0)
+					cm_ave_prev =
+						(cur - 1)->cm_ave_exclude_edge;
+			}
+
+			if (cm_ave_cur == 0)
+				cm_ave_cur = 1;
+			/*multiple 1000 to increate accuracy*/
+			if ((i == 0) || (i == (col_num - 1)))
+				cur->gradient_val = (cm_max_cur - cm_min_cur)
+						* 1000 / cm_ave_cur;
+			else if (i == 1)
+				cur->gradient_val = (cm_max_cur - cm_min_cur +
+						ABS(cm_ave_cur - cm_ave_next))
+						* 1000 / cm_ave_cur;
+			else
+				cur->gradient_val = (cm_max_cur - cm_min_cur +
+						ABS(cm_ave_cur - cm_ave_prev))
+						* 1000 / cm_ave_cur;
+		}
+	}
+}
+
+static void fill_gd_sensor_table(struct gd_sensor *head, int32_t index,
+	int32_t cm_max, int32_t cm_min,	int32_t cm_ave,
+	int32_t cm_max_exclude_edge, int32_t cm_min_exclude_edge,
+	int32_t cm_ave_exclude_edge)
+{
+	(head + index)->cm_max = cm_max;
+	(head + index)->cm_min = cm_min;
+	(head + index)->cm_ave = cm_ave;
+	(head + index)->cm_ave_exclude_edge = cm_ave_exclude_edge;
+	(head + index)->cm_max_exclude_edge = cm_max_exclude_edge;
+	(head + index)->cm_min_exclude_edge = cm_min_exclude_edge;
+}
+
+static void calculate_gd_info(struct gd_sensor *gd_sensor_col,
+	struct gd_sensor *gd_sensor_row, int tx_num, int rx_num,
+	int32_t *cm_sensor_data, int cm_excluding_row_edge,
+	int cm_excluding_col_edge)
+{
+	int32_t cm_max;
+	int32_t cm_min;
+	int32_t cm_ave;
+	int32_t cm_max_exclude_edge;
+	int32_t cm_min_exclude_edge;
+	int32_t cm_ave_exclude_edge;
+	int32_t cm_data;
+	int i;
+	int j;
+
+	/*calculate all the gradient related info for column*/
+	for (i = 0; i < tx_num; i++) {
+		/*re-initialize for a new col*/
+		cm_max = cm_sensor_data[i * rx_num];
+		cm_min = cm_max;
+		cm_ave = 0;
+		cm_max_exclude_edge = cm_sensor_data[i * rx_num + 1];
+		cm_min_exclude_edge = cm_max_exclude_edge;
+		cm_ave_exclude_edge = 0;
+
+		for (j = 0; j < rx_num; j++) {
+			cm_data = cm_sensor_data[i * rx_num + j];
+			if (cm_data > cm_max)
+				cm_max = cm_data;
+			if (cm_data < cm_min)
+				cm_min = cm_data;
+			cm_ave += cm_data;
+			/*calculate exclude edge data*/
+			if ((j > 0) && (j < (rx_num - 1))) {
+				if (cm_data > cm_max_exclude_edge)
+					cm_max_exclude_edge = cm_data;
+				if (cm_data < cm_min_exclude_edge)
+					cm_min_exclude_edge = cm_data;
+				cm_ave_exclude_edge += cm_data;
+			}
+		}
+		cm_ave /= rx_num;
+		cm_ave_exclude_edge /= (rx_num - 2);
+		fill_gd_sensor_table(gd_sensor_col, i, cm_max, cm_min, cm_ave,
+				cm_max_exclude_edge, cm_min_exclude_edge,
+				cm_ave_exclude_edge);
+	}
+
+	calculate_gradient_col(gd_sensor_col, tx_num, cm_excluding_row_edge,
+		 cm_excluding_col_edge);
+
+	/*calculate all the gradient related info for row*/
+	for (j = 0; j < rx_num; j++) {
+		/*re-initialize for a new row*/
+		cm_max = cm_sensor_data[j];
+		cm_min = cm_max;
+		cm_ave = 0;
+		cm_max_exclude_edge = cm_sensor_data[rx_num + j];
+		cm_min_exclude_edge = cm_max_exclude_edge;
+		cm_ave_exclude_edge = 0;
+		for (i = 0; i < tx_num; i++) {
+			cm_data = cm_sensor_data[i * rx_num + j];
+			if (cm_data > cm_max)
+				cm_max = cm_data;
+			if (cm_data < cm_min)
+				cm_min = cm_data;
+			cm_ave += cm_data;
+			/*calculate exclude edge data*/
+			if ((i > 0) && (i < (tx_num - 1))) {
+				if (cm_data > cm_max_exclude_edge)
+					cm_max_exclude_edge = cm_data;
+				if (cm_data < cm_min_exclude_edge)
+					cm_min_exclude_edge = cm_data;
+				cm_ave_exclude_edge += cm_data;
+			}
+		}
+		cm_ave /= tx_num;
+		cm_ave_exclude_edge /= (tx_num - 2);
+		fill_gd_sensor_table(gd_sensor_row, j, cm_max, cm_min, cm_ave,
+				cm_max_exclude_edge, cm_min_exclude_edge,
+				cm_ave_exclude_edge);
+	}
+	calculate_gradient_row(gd_sensor_row, rx_num, cm_excluding_row_edge,
+		 cm_excluding_col_edge);
+}
+
+static int  cyttsp5_get_cmcp_info(struct cyttsp5_device_access_data *dad,
+	struct cmcp_data *cmcp_info)
+{
+	struct device *dev;
+	int32_t *cm_data_panel = cmcp_info->cm_data_panel;
+	int32_t *cp_tx_data_panel = cmcp_info->cp_tx_data_panel;
+	int32_t *cp_rx_data_panel = cmcp_info->cp_rx_data_panel;
+	int32_t *cp_tx_cal_data_panel = cmcp_info->cp_tx_cal_data_panel;
+	int32_t *cp_rx_cal_data_panel = cmcp_info->cp_rx_cal_data_panel;
+	int32_t *cm_btn_data = cmcp_info->cm_btn_data;
+	int32_t *cp_btn_data = cmcp_info->cp_btn_data;
+	struct gd_sensor *gd_sensor_col = cmcp_info->gd_sensor_col;
+	struct gd_sensor *gd_sensor_row = cmcp_info->gd_sensor_row;
+	struct result *result = dad->result;
+	int32_t cp_btn_cal = 0;
+	int32_t cm_btn_cal = 0;
+	int32_t cp_btn_ave = 0;
+	int32_t cm_ave_data_panel = 0;
+	int32_t cm_ave_data_btn = 0;
+	int32_t cm_delta_data_btn = 0;
+	int32_t cp_tx_ave_data_panel = 0;
+	int32_t cp_rx_ave_data_panel = 0;
+	u8 tmp_buf[3];
+	int tx_num;
+	int rx_num;
+	int btn_num;
+	int rc = 0;
+	int i;
+	int len;
+
+	dev = dad->dev;
+	cmcp_info->tx_num = dad->si->sensing_conf_data.tx_num;
+	cmcp_info->rx_num = dad->si->sensing_conf_data.rx_num;
+	cmcp_info->btn_num = dad->si->num_btns;
+
+	tx_num = cmcp_info->tx_num;
+	rx_num = cmcp_info->rx_num;
+	btn_num = cmcp_info->btn_num;
+	parade_debug(dev, DEBUG_LEVEL_2, "%s tx_num=%d", __func__, tx_num);
+	parade_debug(dev, DEBUG_LEVEL_2, "%s rx_num=%d", __func__, rx_num);
+	parade_debug(dev, DEBUG_LEVEL_2, "%s btn_num=%d", __func__, btn_num);
+
+	/*short test*/
+	result->short_test_pass = true;
+	rc = cyttsp5_run_and_get_selftest_result_noprint(
+		dev, tmp_buf, ARRAY_SIZE(tmp_buf),
+		CY_ST_ID_AUTOSHORTS, PIP_CMD_MAX_LENGTH, false);
+	if (rc) {
+		dev_err(dev, "short test not supported");
+		goto exit;
+	}
+	if (dad->ic_buf[1] != 0)
+		result->short_test_pass = false;
+
+	/*Get cm_panel data*/
+	rc = cyttsp5_run_and_get_selftest_result_noprint(
+		dev, tmp_buf, ARRAY_SIZE(tmp_buf),
+		CY_ST_ID_CM_PANEL, PIP_CMD_MAX_LENGTH, true);
+	if (rc) {
+		dev_err(dev, "Get CM Panel not supported");
+		goto exit;
+	}
+	if (cm_data_panel != NULL) {
+		for (i = 0; i < tx_num * rx_num; i++) {
+			len = CM_PANEL_DATA_OFFSET + i * 2;
+			cm_data_panel[i] = 10 * (dad->ic_buf[len]
+					+ 256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"cm_data_panel[%d]=%d\n",
+				i, cm_data_panel[i]);
+			cm_ave_data_panel += cm_data_panel[i];
+		}
+		cm_ave_data_panel /= (tx_num * rx_num);
+		cmcp_info->cm_ave_data_panel = cm_ave_data_panel;
+		cmcp_info->cm_cal_data_panel =
+			10 * (dad->ic_buf[CM_PANEL_DATA_OFFSET + i * 2]
+			+ 256 * dad->ic_buf[CM_PANEL_DATA_OFFSET + i * 2 + 1]);
+		/*multiple 1000 to increate accuracy*/
+		cmcp_info->cm_sensor_delta = ABS((cmcp_info->cm_ave_data_panel
+				- cmcp_info->cm_cal_data_panel) * 1000 /
+				cmcp_info->cm_ave_data_panel);
+
+		/*calculate gradient panel sensor column/row here*/
+		calculate_gd_info(gd_sensor_col, gd_sensor_row, tx_num, rx_num,
+			cm_data_panel, 1, 1);
+	}
+
+
+	for (i = 0; i < tx_num; i++)
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"i=%d max=%d,min=%d,ave=%d, gradient=%d",
+			i, gd_sensor_col[i].cm_max, gd_sensor_col[i].cm_min,
+			gd_sensor_col[i].cm_ave, gd_sensor_col[i].gradient_val);
+
+	for (i = 0; i < rx_num; i++)
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"i=%d max=%d,min=%d,ave=%d, gradient=%d",
+			i, gd_sensor_row[i].cm_max, gd_sensor_row[i].cm_min,
+			gd_sensor_row[i].cm_ave, gd_sensor_row[i].gradient_val);
+
+	/*Get cp data*/
+	rc = cyttsp5_run_and_get_selftest_result_noprint(
+		dev, tmp_buf, ARRAY_SIZE(tmp_buf),
+		CY_ST_ID_CP_PANEL, PIP_CMD_MAX_LENGTH, true);
+	if (rc) {
+		dev_err(dev, "Get CP Panel not supported");
+		goto exit;
+	}
+	/*Get cp_tx_data_panel*/
+	if (cp_tx_data_panel != NULL) {
+		for (i = 0; i < tx_num; i++) {
+			len = CP_PANEL_DATA_OFFSET + i * 2;
+			cp_tx_data_panel[i] = 10 * (dad->ic_buf[len]
+					+ 256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"cp_tx_data_panel[%d]=%d\n",
+				i, cp_tx_data_panel[i]);
+			cp_tx_ave_data_panel += cp_tx_data_panel[i];
+		}
+		cp_tx_ave_data_panel /= tx_num;
+		cmcp_info->cp_tx_ave_data_panel = cp_tx_ave_data_panel;
+	}
+
+	/*Get cp_tx_cal_data_panel*/
+	if (cp_tx_cal_data_panel != NULL) {
+		for (i = 0; i < tx_num; i++) {
+			len = CP_PANEL_DATA_OFFSET + tx_num * 2 + i * 2;
+			cp_tx_cal_data_panel[i] = 10 * (dad->ic_buf[len]
+					+ 256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				" cp_tx_cal_data_panel[%d]=%d\n",
+				i, cp_tx_cal_data_panel[i]);
+		}
+
+		/*get cp_sensor_tx_delta,using the first sensor cal value*/
+		/* for temp multiple 1000 to increase accuracy*/
+		cmcp_info->cp_sensor_tx_delta = ABS((cp_tx_cal_data_panel[0]
+			- cp_tx_ave_data_panel) * 1000 / cp_tx_ave_data_panel);
+	}
+
+	/*Get cp_rx_data_panel*/
+	if (cp_rx_data_panel != NULL) {
+		for (i = 0; i < rx_num; i++) {
+			len = CP_PANEL_DATA_OFFSET + tx_num * 4 + i * 2;
+			cp_rx_data_panel[i] = 10 * (dad->ic_buf[len] +
+					256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"cp_rx_data_panel[%d]=%d\n", i,
+				cp_rx_data_panel[i]);
+			cp_rx_ave_data_panel += cp_rx_data_panel[i];
+		}
+		cp_rx_ave_data_panel /= rx_num;
+		cmcp_info->cp_rx_ave_data_panel = cp_rx_ave_data_panel;
+	}
+
+	/*Get cp_rx_cal_data_panel*/
+	if (cp_rx_cal_data_panel != NULL) {
+		for (i = 0; i < rx_num; i++) {
+			len = CP_PANEL_DATA_OFFSET + tx_num * 4
+				+ rx_num * 2 + i * 2;
+			cp_rx_cal_data_panel[i] = 10 * (dad->ic_buf[len]
+				+ 256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"cp_rx_cal_data_panel[%d]=%d\n", i,
+				cp_rx_cal_data_panel[i]);
+		}
+
+		/*get cp_sensor_rx_delta,using the first sensor cal value */
+		/*for temp multiple 1000 to increase accuracy*/
+		cmcp_info->cp_sensor_rx_delta = ABS((cp_rx_cal_data_panel[0]
+			- cp_rx_ave_data_panel) * 1000 / cp_rx_ave_data_panel);
+
+	}
+
+	if (btn_num == 0)
+		goto skip_button_test;
+
+	/*get cm btn data*/
+	rc = cyttsp5_run_and_get_selftest_result_noprint(
+		dev, tmp_buf, ARRAY_SIZE(tmp_buf),
+		CY_ST_ID_CM_BUTTON, PIP_CMD_MAX_LENGTH, true);
+	if (rc) {
+		dev_err(dev, "Get CM BTN not supported");
+		goto exit;
+	}
+	if (cm_btn_data != NULL) {
+		for (i = 0; i < btn_num; i++) {
+			len = CM_BTN_DATA_OFFSET + i * 2;
+			cm_btn_data[i] = 10 * (dad->ic_buf[len]
+					+ 256 * dad->ic_buf[len + 1]);
+			parade_debug(dev, DEBUG_LEVEL_2,
+				" cm_btn_data[%d]=%d\n",
+				i, cm_btn_data[i]);
+			cm_ave_data_btn += cm_btn_data[i];
+		}
+		cm_ave_data_btn /= btn_num;
+		cm_btn_cal = 10 * (dad->ic_buf[CM_BTN_DATA_OFFSET + i * 2]
+			 + 256 * dad->ic_buf[CM_BTN_DATA_OFFSET + i * 2 + 1]);
+		/*multiple 1000 to increase accuracy*/
+		cm_delta_data_btn = ABS((cm_ave_data_btn-cm_btn_cal)
+			 * 1000 / cm_ave_data_btn);
+		parade_debug(dev, DEBUG_LEVEL_2, " cm_btn_cal=%d\n",
+			cm_btn_cal);
+
+		cmcp_info->cm_ave_data_btn = cm_ave_data_btn;
+		cmcp_info->cm_cal_data_btn = cm_btn_cal;
+		cmcp_info->cm_delta_data_btn = cm_delta_data_btn;
+	}
+
+	/*get cp btn data*/
+	rc = cyttsp5_run_and_get_selftest_result_noprint(
+		dev, tmp_buf, ARRAY_SIZE(tmp_buf),
+		CY_ST_ID_CP_BUTTON, PIP_CMD_MAX_LENGTH, true);
+	if (rc) {
+		dev_err(dev, "Get CP BTN not supported");
+		goto exit;
+	}
+
+	if (cp_btn_data != NULL)
+		for (i = 0; i < btn_num; i++) {
+			len = CP_BTN_DATA_OFFSET + i * 2;
+			cp_btn_data[i] = 10 * (dad->ic_buf[len]
+				+ 256 * dad->ic_buf[len + 1]);
+			cp_btn_ave += cp_btn_data[i];
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"cp_btn_data[%d]=%d\n",
+				i, cp_btn_data[i]);
+		}
+
+	cp_btn_ave /= btn_num;
+	cp_btn_cal = 10 * (dad->ic_buf[CP_BTN_DATA_OFFSET + i * 2]
+		 + 256 * dad->ic_buf[CP_BTN_DATA_OFFSET + i * 2 + 1]);
+	cmcp_info->cp_button_ave = cp_btn_ave;
+	cmcp_info->cp_btn_cal = cp_btn_cal;
+	/*multiple 1000 to increase accuracy*/
+	cmcp_info->cp_button_delta = ABS((cp_btn_cal - cp_btn_ave)
+				* 1000 / cp_btn_ave);
+	parade_debug(dev, DEBUG_LEVEL_2, " cp_btn_cal=%d\n", cp_btn_cal);
+	parade_debug(dev, DEBUG_LEVEL_2, " cp_btn_ave=%d\n", cp_btn_ave);
+
+skip_button_test:
+exit:
+	return rc;
+}
+
+static void cyttsp5_free_cmcp_buf(struct cmcp_data *cmcp_info)
+{
+	if (cmcp_info->gd_sensor_col != NULL)
+		kfree(cmcp_info->gd_sensor_col);
+	if (cmcp_info->gd_sensor_row != NULL)
+		kfree(cmcp_info->gd_sensor_row);
+	if (cmcp_info->cm_data_panel != NULL)
+		kfree(cmcp_info->cm_data_panel);
+	if (cmcp_info->cp_tx_data_panel != NULL)
+		kfree(cmcp_info->cp_tx_data_panel);
+	if (cmcp_info->cp_rx_data_panel != NULL)
+		kfree(cmcp_info->cp_rx_data_panel);
+	if (cmcp_info->cp_tx_cal_data_panel != NULL)
+		kfree(cmcp_info->cp_tx_cal_data_panel);
+	if (cmcp_info->cp_rx_cal_data_panel != NULL)
+		kfree(cmcp_info->cp_rx_cal_data_panel);
+	if (cmcp_info->cm_btn_data != NULL)
+		kfree(cmcp_info->cm_btn_data);
+	if (cmcp_info->cp_btn_data != NULL)
+		kfree(cmcp_info->cp_btn_data);
+	if (cmcp_info->cm_sensor_column_delta != NULL)
+		kfree(cmcp_info->cm_sensor_column_delta);
+	if (cmcp_info->cm_sensor_row_delta != NULL)
+		kfree(cmcp_info->cm_sensor_row_delta);
+}
+
+static int cyttsp5_cmcp_get_test_item(int item_input)
+{
+	int test_item = 0;
+
+	switch (item_input) {
+	case CMCP_FULL:
+		test_item = CMCP_FULL_CASE;
+		break;
+	case CMCP_CM_PANEL:
+		test_item = CM_PANEL;
+		break;
+	case CMCP_CP_PANEL:
+		test_item = CP_PANEL;
+		break;
+	case CMCP_CM_BTN:
+		test_item = CM_BTN;
+		break;
+	case CMCP_CP_BTN:
+		test_item = CP_BTN;
+		break;
+	}
+	return test_item;
+}
+
+static ssize_t cyttsp5_cmcp_test_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	struct cmcp_data *cmcp_info = dad->cmcp_info;
+	struct result *result = dad->result;
+	struct configuration *configuration = dad->configs;
+	bool final_pass = true;
+	static const char * const cmcp_test_case_array[] = {"Full Cm/Cp test",
+		"Cm panel test", "Cp panel test",
+		"Cm button test", "Cp button test"};
+	int index = 0;
+	int test_item = 0;
+	int no_builtin_file = 0;
+	int rc;
+	u8 status;
+	int self_test_id_supported = 0;
+
+	dev = dad->dev;
+	if ((configuration == NULL) || (cmcp_info == NULL))
+		goto exit;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	if (dad->cmcp_test_in_progress) {
+		mutex_unlock(&dad->sysfs_lock);
+		goto cmcp_not_ready;
+	}
+	dad->cmcp_test_in_progress = 1;
+
+	dad->test_executed = 0;
+	test_item = cyttsp5_cmcp_get_test_item(dad->cmcp_test_items);
+
+	if (dad->builtin_cmcp_threshold_status < 0) {
+		dev_err(dev, "%s: No cmcp threshold file.\n", __func__);
+		no_builtin_file = 1;
+		mutex_unlock(&dad->sysfs_lock);
+		goto start_testing;
+	}
+
+	if (dad->cmcp_test_items < 0) {
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Invalid test item! Should be 0~4!\n", __func__);
+		mutex_unlock(&dad->sysfs_lock);
+		goto invalid_item;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Test item is %s, %d\n",
+		__func__, cmcp_test_case_array[dad->cmcp_test_items],
+		test_item);
+
+	if ((dad->si->num_btns == 0) && ((dad->cmcp_test_items == CMCP_CM_BTN)
+		|| (dad->cmcp_test_items == CMCP_CP_BTN))) {
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: FW doesn't support button!\n", __func__);
+		mutex_unlock(&dad->sysfs_lock);
+		goto invalid_item_btn;
+	}
+
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (cmcp_check_config_fw_match(dev, configuration))
+		goto mismatch;
+
+start_testing:
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Start Cm/Cp test!\n", __func__);
+	result->cm_test_pass = true;
+	result->cp_test_pass = true;
+	/*stop watchdog*/
+	rc = cmd->request_stop_wd(dev);
+	if (rc)
+		dev_err(dev, "stop watchdog failed");
+	/*force single tx*/
+	rc = cmd->nonhid_cmd->set_param(dev, 0, 0x1F, 1, 1);
+	if (rc)
+		dev_err(dev, "force single tx failed");
+	/*suspend_scanning */
+	rc = cmd->nonhid_cmd->suspend_scanning(dev, 0);
+	if (rc)
+		dev_err(dev, "suspend_scanning failed");
+	/*do calibration*/
+	if (!dad->cmcp_force_calibrate) {
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"do calibration in single tx mode");
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 0, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs for mutual r=%d\n",
+				__func__, rc);
+
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 1, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs for buttons r=%d\n",
+				__func__, rc);
+
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 2, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs  for self r=%d\n",
+				__func__, rc);
+
+	}
+	/*resume_scanning */
+	rc = cmd->nonhid_cmd->resume_scanning(dev, 0);
+	if (rc)
+		dev_err(dev, "resume_scanning failed");
+
+	/*get all cmcp data from FW*/
+	self_test_id_supported =
+		cyttsp5_get_cmcp_info(dad, cmcp_info);
+	if (self_test_id_supported)
+		dev_err(dev, "cyttsp5_get_cmcp_info failed");
+
+	/*restore to multi tx*/
+	rc = cmd->nonhid_cmd->set_param(dev, 0, 0x1F, 0, 1);
+	if (rc)
+		dev_err(dev, "restore multi tx failed");
+
+	/*suspend_scanning */
+	rc = cmd->nonhid_cmd->suspend_scanning(dev, 0);
+	if (rc)
+		dev_err(dev, "suspend_scanning failed");
+	/*do calibration*/
+	if (!dad->cmcp_force_calibrate) {
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"do calibration in multi tx mode");
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 0, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs for mutual r=%d\n",
+				__func__, rc);
+
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 1, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs for buttons r=%d\n",
+				__func__, rc);
+
+		rc = _cyttsp5_calibrate_idacs_cmd(dev, 2, &status);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Err on calibrate idacs  for self r=%d\n",
+				__func__, rc);
+
+	}
+	/*resume_scanning */
+	rc = cmd->nonhid_cmd->resume_scanning(dev, 0);
+	if (rc)
+		dev_err(dev, "resume_scanning failed");
+
+	/*start  watchdog*/
+	rc = cmd->request_start_wd(dev);
+	if (rc)
+		dev_err(dev, "start watchdog failed");
+
+	if (self_test_id_supported)
+		goto self_test_id_failed;
+
+	if (no_builtin_file)
+		goto no_builtin;
+
+	if (test_item && CM_ENABLED)
+		validate_cm_test_results(dev, configuration, cmcp_info,
+			result, &final_pass, test_item);
+
+	if (test_item && CP_ENABLED)
+		validate_cp_test_results(dev, configuration, cmcp_info,
+			result, &final_pass, test_item);
+no_builtin:
+	/*full test and full check*/
+	if ((dad->cmcp_test_items == CMCP_FULL) && (dad->cmcp_range_check == 0))
+		result->test_summary = result->cm_test_pass
+			&& result->cp_test_pass && result->short_test_pass;
+
+	else if ((dad->cmcp_test_items == CMCP_FULL)
+		&& (dad->cmcp_range_check == 1))
+		result->test_summary = result->cm_sensor_gd_col_pass
+			&& result->cm_sensor_gd_row_pass
+			&& result->cm_sensor_validation_pass
+			&& result->cp_rx_validation_pass
+			&& result->cp_tx_validation_pass
+			&& result->short_test_pass;
+
+	else if (dad->cmcp_test_items == CMCP_CM_PANEL)
+		result->test_summary = result->cm_sensor_gd_col_pass
+			&& result->cm_sensor_gd_row_pass
+			&& result->cm_sensor_validation_pass
+			&& result->cm_sensor_row_delta_pass
+			&& result->cm_sensor_col_delta_pass
+			&& result->cm_sensor_calibration_pass
+			&& result->cm_sensor_delta_pass;
+
+	else if (dad->cmcp_test_items == CMCP_CP_PANEL)
+		result->test_summary = result->cp_sensor_delta_pass
+			&& result->cp_rx_validation_pass
+			&& result->cp_tx_validation_pass;
+
+	else if (dad->cmcp_test_items == CMCP_CM_BTN)
+		result->test_summary = result->cm_button_validation_pass
+			&& result->cm_button_delta_pass;
+
+	else if (dad->cmcp_test_items == CMCP_CP_BTN)
+		result->test_summary = result->cp_button_delta_pass
+			&& result->cp_button_average_pass
+			&& result->cp_button_validation_pass;
+
+	mutex_lock(&dad->sysfs_lock);
+	dad->test_executed = 1;
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (result->test_summary) {
+		dev_vdbg(dev, "%s: Finish Cm/Cp test! All Test Passed\n",
+			__func__);
+		index = snprintf(buf, CY_MAX_PRBUF_SIZE, "Status 1\n");
+	} else {
+		dev_vdbg(dev, "%s: Finish Cm/Cp test! Range Check Failure\n",
+			__func__);
+		index = snprintf(buf, CY_MAX_PRBUF_SIZE, "Status 6\n");
+	}
+
+	goto cmcp_ready;
+mismatch:
+	index = snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Status 2\nInput cmcp threshold file mismatches with FW\n");
+	goto cmcp_ready;
+invalid_item_btn:
+	index = snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Status 3\nFW doesn't support button!\n");
+	goto cmcp_ready;
+invalid_item:
+	index = snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Status 4\nWrong test item or range check input!\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"Only support below items:\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"0 - Cm/Cp Panel & Button with Gradient (Typical)\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"1 - Cm Panel with Gradient\n2 - Cp Panel\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"3 - Cm Button\n4 - Cp Button\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"Only support below range check:\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"0 - Full Range Checking (default)\n");
+	index += snprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+		"1 - Basic Range Checking(TSG5 style)\n");
+	goto cmcp_ready;
+self_test_id_failed:
+	index = snprintf(buf, CY_MAX_PRBUF_SIZE,
+		"Status 5\nget self test ID not supported!");
+	goto cmcp_ready;
+cmcp_not_ready:
+	index = snprintf(buf, CY_MAX_PRBUF_SIZE, "Status 0\n");
+	goto cmcp_ready;
+cmcp_ready:
+	mutex_lock(&dad->sysfs_lock);
+	dad->cmcp_test_in_progress = 0;
+	mutex_unlock(&dad->sysfs_lock);
+exit:
+	return index;
+}
+
+static ssize_t cyttsp5_cmcp_test_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	u8 test_item = 0;
+	u8 range_check = 0;
+	u8 force_calibrate = 0;
+	int ret = 0;
+	static const char * const cmcp_test_case_array[] = {"Full Cm/Cp test",
+		"Cm panel test", "Cp panel test",
+		"Cm button test", "Cp button test"};
+	static const char * const cmcp_test_range_check_array[] = {
+		"Full (default)", "Basic"};
+	static const char * const cmcp_test_force_cal_array[] = {
+		"Calibrate When Testing (default)", "No Calibration"};
+	ssize_t length = 0;
+
+	pm_runtime_get_sync(dev);
+	mutex_lock(&dad->sysfs_lock);
+
+	length = cyttsp5_ic_parse_input(dev, buf, size, dad->ic_buf,
+			CY_MAX_PRBUF_SIZE);
+	if (length <= 0 || length > 3) {
+		dev_err(dev, "%s: Input format error!\n", __func__);
+		dad->cmcp_test_items = -EINVAL;
+		ret = -EINVAL;
+		goto error;
+	}
+
+	/* Get test item  */
+	test_item = dad->ic_buf[0];
+	/* Get range check */
+	if (length >= 2)
+		range_check = dad->ic_buf[1];
+	/* Get force calibration */
+	if (length == 3)
+		force_calibrate = dad->ic_buf[2];
+
+	/*
+	 * Test item limitation:
+	 *	 0: Perform all Tests
+	 *	 1: CM Panel with Gradient
+	 *	 2: CP Panel
+	 *	 3: CM Button
+	 *	 4: CP Button
+	 * Ranage check limitation:
+	 *	 0: full check
+	 *	 1: basic check
+	 * Force calibrate limitation:
+	 *	 0: do calibration
+	 *	 1: don't do calibration
+	 */
+	if ((test_item < 0) || (test_item > 4) || (range_check > 1)
+		|| (force_calibrate > 1)) {
+		dev_err(dev,
+			"%s: Item:[0,4],Range:[0,1],Force calibrate:[0,1]\n",
+			__func__);
+		dad->cmcp_test_items = -EINVAL;
+		ret = -EINVAL;
+		goto error;
+	}
+	/*
+	 * If it is not all Test, then range_check should be 0
+	 * because other test does not has concept of basic check
+	 */
+	if (test_item > 0 && test_item < 5)
+		range_check = 0;
+		dad->cmcp_test_items = test_item;
+		dad->cmcp_range_check = range_check;
+		dad->cmcp_force_calibrate = force_calibrate;
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Item: %s, Range check: %s, Force calibrate: %s.\n",
+			__func__,
+			cmcp_test_case_array[test_item],
+			cmcp_test_range_check_array[range_check],
+			cmcp_test_force_cal_array[force_calibrate]);
+
+error:
+	mutex_unlock(&dad->sysfs_lock);
+	pm_runtime_put(dev);
+
+	if (ret)
+		return ret;
+
+	return size;
+}
+
+static DEVICE_ATTR(cmcp_test, 0600,
+	cyttsp5_cmcp_test_show, cyttsp5_cmcp_test_store);
+
+static ssize_t csv_loading_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+
+	pr_info("%s: start to csv_loading\n", __func__);
+
+	schedule_work(&dad->cmcp_threshold_update);
+
+	pr_info("%s: end to csv_loading\n", __func__);
+
+	return 0;
+}
+
+static DEVICE_ATTR(csv_loading, 0400, csv_loading_show, NULL);
+
+static int prepare_print_string(char *out_buf, char *in_buf, int index)
+{
+	if ((out_buf == NULL) || (in_buf == NULL))
+		return index;
+
+	index += scnprintf(&out_buf[index], MAX_BUF_LEN - index, "%s", in_buf);
+	return index;
+}
+
+static int prepare_print_data(char *out_buf, int32_t *in_buf,
+		int index, int data_num)
+{
+	int i;
+
+	if ((out_buf == NULL) || (in_buf == NULL))
+		return index;
+
+	for (i = 0; i < data_num; i++)
+		index += scnprintf(&out_buf[index], MAX_BUF_LEN - index,
+				"%d,", in_buf[i]);
+	return index;
+}
+
+static int save_header(char *out_buf, int index, struct result *result)
+{
+	struct timex txc;
+	struct rtc_time tm;
+	char time_buf[100] = {0};
+
+	do_gettimeofday(&(txc.time));
+	rtc_time_to_tm(txc.time.tv_sec, &tm);
+	scnprintf(time_buf, 100, "%d/%d/%d,TIME,%d:%d:%d,", tm.tm_year + 1900,
+		 tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	index = prepare_print_string(out_buf, ",.header,\n", index);
+	index = prepare_print_string(out_buf, ",DATE,", index);
+	index = prepare_print_string(out_buf, &time_buf[0], index);
+	index = prepare_print_string(out_buf, ",\n", index);
+	index = prepare_print_string(out_buf, ",SW_VERSION,", index);
+	index = prepare_print_string(out_buf, CY_DRIVER_VERSION, index);
+	index = prepare_print_string(out_buf, ",\n", index);
+	index = prepare_print_string(out_buf, ",.end,\n", index);
+	index = prepare_print_string(out_buf, ",.engineering data,\n", index);
+
+	return index;
+}
+
+static int print_silicon_id(char *out_buf, char *in_buf, int index)
+{
+	index = prepare_print_string(out_buf, ",1,", index);
+	index = prepare_print_string(out_buf, &in_buf[0], index);
+	return index;
+}
+
+static int save_engineering_data_0_0(struct cyttsp5_device_access_data *dad,
+		char *out_buf, int index, struct cmcp_data *cmcp_info,
+		struct configuration *configuration,
+		struct result *result, int test_item, int no_builtin_file)
+{
+	int i, j;
+	int tx_num = cmcp_info->tx_num;
+	int rx_num = cmcp_info->rx_num;
+	int tmp = 0;
+	char device_id[20] = {0};
+	char tmp_buf[10] = {0};
+	int len;
+
+	/* print CM_DATA_ROW */
+	for (i = 0; i < rx_num; i++) {
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+				",Sensor Cm Validation,CM_DATA_ROW",
+				index);
+		index = prepare_print_data(out_buf, &i, index, 1);
+		for (j = 0; j < tx_num; j++) {
+			index = prepare_print_data(out_buf,
+				&cmcp_info->cm_data_panel[j * rx_num + i],
+				index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if (no_builtin_file)
+		return 0;
+
+	/* print CM_MAX_GRADIENT_COLS_PERCENT */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation,CM_MAX_GRADIENT_COLS_PERCENT,",
+		index);
+	for (i = 0; i < tx_num; i++) {
+		memset(tmp_buf, 0, ARRAY_SIZE(tmp_buf));
+		scnprintf(tmp_buf, 10, "%d.%d,",
+			cmcp_info->gd_sensor_col[i].gradient_val / 10,
+			cmcp_info->gd_sensor_col[i].gradient_val % 10);
+		index = prepare_print_string(out_buf, &tmp_buf[0], index);
+	}
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/* print CM_MAX_GRADIENT_ROWS_PERCENT */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation,CM_MAX_GRADIENT_ROWS_PERCENT,",
+		index);
+	for (i = 0; i < rx_num; i++) {
+		memset(tmp_buf, 0, ARRAY_SIZE(tmp_buf));
+		scnprintf(tmp_buf, 10, "%d.%d,",
+			cmcp_info->gd_sensor_row[i].gradient_val / 10,
+			cmcp_info->gd_sensor_row[i].gradient_val % 10);
+		index = prepare_print_string(out_buf,
+			&tmp_buf[0], index);
+	}
+	index = prepare_print_string(out_buf, "\n", index);
+
+	if (dad->cmcp_range_check)
+		return 0;
+
+	/*print CM_DELTA_COLUMN*/
+	for (i = 0; i < rx_num; i++) {
+		index = print_silicon_id(
+			out_buf,
+			&device_id[0], index);
+		index = prepare_print_string(
+			out_buf,
+			",Sensor Cm Validation,DELTA_COLUMNS_ROW",
+			index);
+		index = prepare_print_data(out_buf, &i, index, 1);
+		index = prepare_print_data(out_buf, &tmp, index, 1);
+		for (j = 1; j < tx_num; j++) {
+			len = (j - 1) * rx_num + i;
+			index = prepare_print_data(
+				out_buf,
+				&cmcp_info->cm_sensor_column_delta[len],
+				index, 1);
+		}
+		index = prepare_print_string(
+				out_buf,
+				"\n", index);
+	}
+
+	/*print CM_DELTA_ROW*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,DELTA_ROWS_ROW",
+			index);
+	index = prepare_print_data(out_buf, &tmp, index, 1);
+	for (j = 0; j < tx_num; j++)
+		index = prepare_print_data(out_buf,
+			&tmp, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	for (i = 1; i < rx_num; i++) {
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(
+				out_buf,
+				",Sensor Cm Validation,DELTA_ROWS_ROW",
+				index);
+		index = prepare_print_data(out_buf, &i, index, 1);
+		for (j = 0; j < tx_num; j++) {
+			len = j * rx_num + i - 1;
+			index = prepare_print_data(
+				out_buf,
+				&cmcp_info->cm_sensor_row_delta[len],
+				index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	/*print pass/fail Sensor Cm Validation*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_test_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,PASS,\n", index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,FAIL,\n", index);
+
+	return 0;
+}
+
+static int save_engineering_data_0(struct cyttsp5_device_access_data *dad,
+		char *out_buf, int index, struct cmcp_data *cmcp_info,
+		struct configuration *configuration,
+		struct result *result, int test_item, int no_builtin_file)
+{
+	int btn_num = cmcp_info->btn_num;
+	char device_id[20] = {0};
+	char tmp_buf[10] = {0};
+
+	/*print BUTNS_CM_DATA_ROW00*/
+	if (((test_item & CM_BTN) == CM_BTN) && (btn_num > 0)) {
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,BUTNS_CM_DATA_ROW00,", index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cm_btn_data[0], index, btn_num);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if ((test_item & CM_PANEL) == CM_PANEL)
+		save_engineering_data_0_0(dad, out_buf, index, cmcp_info,
+			configuration, result, test_item, no_builtin_file);
+
+	if (no_builtin_file)
+		return 0;
+
+	if (((test_item & CM_BTN) == CM_BTN) && (btn_num > 0)
+		&& (!dad->cmcp_range_check)) {
+
+		/*print Button Element by Element */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		if (result->cm_button_validation_pass)
+			index = prepare_print_string(out_buf,
+				",Sensor Cm Validation - Buttons,PASS\n",
+				index);
+		else
+			index = prepare_print_string(out_buf,
+				",Sensor Cm Validation - Buttons,FAIL\n",
+				index);
+
+		/*
+		 *print  Sensor Cm Validation
+		 *- Buttons Range Buttons Range
+		 */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Buttons Range,Buttons Range,",
+			index);
+		scnprintf(tmp_buf, 10, "%d.%d,",
+			cmcp_info->cm_delta_data_btn / 10,
+			cmcp_info->cm_delta_data_btn % 10);
+		index = prepare_print_string(out_buf, &tmp_buf[0], index);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print  Sensor Cm Validation
+		 *-Buttons Range Cm_button_avg
+		 */
+		index = print_silicon_id(out_buf,
+			&device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Buttons Range,Cm_button_avg,",
+			index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cm_ave_data_btn, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print  Sensor Cm Validation
+		 * -Buttons Range Cm_button_avg
+		 */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Buttons Range,Cm_button_cal,",
+			index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cm_cal_data_btn, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print  Sensor Cm Validation
+		 *-Buttons Range pass/fail
+		 */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		if (result->cm_button_delta_pass)
+			index = prepare_print_string(out_buf,
+				",Sensor Cm Validation - Buttons Range,PASS,",
+				index);
+		else
+			index = prepare_print_string(out_buf,
+				",Sensor Cm Validation - Buttons Range,FAIL,",
+				index);
+		index = prepare_print_data(out_buf,
+			&configuration->cm_max_delta_button_percent, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if ((test_item & CM_PANEL) != CM_PANEL || dad->cmcp_range_check)
+		return 0;
+
+	memset(tmp_buf, 0, ARRAY_SIZE(tmp_buf));
+	/*print Cm_sensor_cal */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation - Calibration,Cm_sensor_cal,",
+		index);
+	index = prepare_print_data(out_buf,
+		&cmcp_info->cm_cal_data_panel, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Cm_sensor_cal limit*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_calibration_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Calibration 1,PASS",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Calibration 1,FAIL",
+			index);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_min_limit_cal, index, 1);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_max_limit_cal, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Columns Delta Matrix*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_col_delta_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Columns Delta Matrix,PASS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Columns Delta Matrix,FAIL,",
+			index);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_range_limit_col, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Cm Validation - Element by Element*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_validation_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Element by Element,PASS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Element by Element,FAIL,",
+			index);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Cm Validation -Gradient Cols*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_gd_col_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Gradient Cols,PASS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Gradient Cols,FAIL,",
+			index);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Cm Validation -Gradient Rows*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_gd_row_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Gradient Rows,PASS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Gradient Rows,FAIL,",
+			index);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*
+	 * Print Sensor Cm Validation
+	 * -Rows Delta Matrix
+	 */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_row_delta_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Row Delta Matrix,PASS,LIMITS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Row Delta Matrix,FAIL,LIMITS,",
+			index);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_range_limit_row, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Cm_sensor_avg */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation - Sensor Range,Cm_sensor_avg,", index);
+	index = prepare_print_data(out_buf,
+		&cmcp_info->cm_ave_data_panel, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*printSensor Cm Validation -
+	 * Sensor Range,   Sensor Range
+	 */
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation - Sensor Range,Sensor Range,", index);
+	scnprintf(tmp_buf, 10, "%d.%d,",
+		cmcp_info->cm_sensor_delta / 10,
+		cmcp_info->cm_sensor_delta % 10);
+	index = prepare_print_string(out_buf, &tmp_buf[0], index);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print Sensor Cm Validation - Sensor Range*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->cm_sensor_delta_pass)
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Sensor Range,PASS,LIMITS,",
+			index);
+	else
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation - Sensor Range,FAIL,LIMITS,",
+			index);
+	index = prepare_print_data(out_buf,
+			&configuration->cm_max_delta_sensor_percent,
+			index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	return 0;
+}
+
+static int save_engineering_data_1(struct cyttsp5_device_access_data *dad,
+		char *out_buf, int index, struct cmcp_data *cmcp_info,
+		struct configuration *configuration,
+		struct result *result, int test_item, int no_builtin_file)
+{
+	int tx_num = cmcp_info->tx_num;
+	int rx_num = cmcp_info->rx_num;
+	int btn_num = cmcp_info->btn_num;
+	char device_id[20] = {0};
+	char tmp_buf[10] = {0};
+
+	if (((test_item & CP_BTN) == CP_BTN) && (btn_num > 0)) {
+		/*print BUTNS_CP_DATA_ROW00  */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,BUTNS_CP_DATA_ROW00,",
+			index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_btn_data[0], index, btn_num);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		if (!no_builtin_file && !dad->cmcp_range_check) {
+			/*print Cp Button Element by Element */
+			index = print_silicon_id(out_buf, &device_id[0], index);
+			if (result->cp_button_validation_pass)
+				index = prepare_print_string(out_buf,
+					CY_ERROR0 "PASS\n", index);
+			else
+				index = prepare_print_string(out_buf,
+					CY_ERROR0 "FAIL\n", index);
+
+			/*print cp_button_ave  */
+			index = print_silicon_id(out_buf, &device_id[0], index);
+			index = prepare_print_string(out_buf,
+				",Self-cap Calibration Check,Cp_button_avg,",
+				index);
+			index = prepare_print_data(out_buf,
+				&cmcp_info->cp_button_ave, index, 1);
+			index = prepare_print_string(out_buf, "\n", index);
+
+			/*print Cp_button_cal  */
+			index = print_silicon_id(out_buf, &device_id[0], index);
+			index = prepare_print_string(out_buf,
+				",Self-cap Calibration Check,Cp_button_cal,",
+				index);
+			index = prepare_print_data(out_buf,
+				&cmcp_info->cp_btn_cal, index, 1);
+			index = prepare_print_string(out_buf, "\n", index);
+		}
+	}
+
+	if ((test_item & CP_PANEL) == CP_PANEL) {
+		/*print CP_DATA_RX */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,CP_DATA_RX,", index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_rx_data_panel[0], index, rx_num);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print CP_DATA_TX */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,CP_DATA_TX,", index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_tx_data_panel[0], index, tx_num);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if (((test_item & CP_BTN) == CP_BTN) && (btn_num > 0)
+		&& !dad->cmcp_range_check) {
+		if (!no_builtin_file) {
+			/*print  Cp_delta_button */
+			index = print_silicon_id(out_buf, &device_id[0],
+				index);
+			index = prepare_print_string(out_buf,
+				",Self-cap Calibration Check,Cp_delta_button,",
+				index);
+			scnprintf(tmp_buf, 10, "%d.%d,",
+				cmcp_info->cp_button_delta / 10,
+				cmcp_info->cp_button_delta % 10);
+			index = prepare_print_string(out_buf,
+				&tmp_buf[0], index);
+			index = prepare_print_string(out_buf, "\n", index);
+		}
+	}
+
+	if ((test_item & CP_PANEL) == CP_PANEL && !dad->cmcp_range_check
+		&& !no_builtin_file) {
+		/*print Cp_delta_rx */
+		memset(tmp_buf, 0, ARRAY_SIZE(tmp_buf));
+		index = print_silicon_id(out_buf, &device_id[0],
+			index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_delta_rx,",
+			index);
+		scnprintf(tmp_buf, 10, "%d.%d,",
+			cmcp_info->cp_sensor_rx_delta / 10,
+			cmcp_info->cp_sensor_rx_delta % 10);
+		index = prepare_print_string(out_buf,
+				&tmp_buf[0], index);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print Cp_delta_tx */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_delta_tx,",
+			index);
+		scnprintf(tmp_buf, 10, "%d.%d,",
+			cmcp_info->cp_sensor_tx_delta / 10,
+			cmcp_info->cp_sensor_tx_delta % 10);
+		index = prepare_print_string(out_buf, &tmp_buf[0], index);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print Cp_sensor_avg_rx */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_sensor_avg_rx,",
+			index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_rx_ave_data_panel, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print Cp_sensor_avg_tx */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_sensor_avg_tx,",
+			index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_tx_ave_data_panel, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print Cp_sensor_cal_rx */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_sensor_cal_rx,", index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_rx_cal_data_panel[0], index, rx_num);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print Cp_sensor_cal_tx */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,Cp_sensor_cal_tx,", index);
+		index = prepare_print_data(out_buf,
+			&cmcp_info->cp_tx_cal_data_panel[0], index, tx_num);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if (!no_builtin_file && !dad->cmcp_range_check) {
+		/*print  cp test limits  */
+		index = print_silicon_id(out_buf, &device_id[0], index);
+		if (result->cp_test_pass)
+			index = prepare_print_string(out_buf,
+				",Self-cap Calibration Check,PASS, LIMITS,",
+				index);
+		else
+			index = prepare_print_string(out_buf,
+				",Self-cap Calibration Check,FAIL, LIMITS,",
+				index);
+
+		index = prepare_print_string(out_buf,
+			"CP_MAX_DELTA_SENSOR_RX_PERCENT,", index);
+		index = prepare_print_data(out_buf,
+			&configuration->cp_max_delta_sensor_rx_percent,
+			index, 1);
+		index = prepare_print_string(out_buf,
+			"CP_MAX_DELTA_SENSOR_TX_PERCENT,", index);
+		index = prepare_print_data(out_buf,
+			&configuration->cp_max_delta_sensor_tx_percent,
+			index, 1);
+		index = prepare_print_string(out_buf,
+			"CP_MAX_DELTA_BUTTON_PERCENT,", index);
+		index = prepare_print_data(out_buf,
+			&configuration->cp_max_delta_button_percent, index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	return 0;
+}
+
+static int save_engineering_data_2_0(
+		char *out_buf, int index, struct cmcp_data *cmcp_info,
+		struct configuration *configuration,
+		struct result *result, int test_item, int no_builtin_file)
+{
+	int i, j;
+	int tx_num = cmcp_info->tx_num;
+	int rx_num = cmcp_info->rx_num;
+	int btn_num = cmcp_info->btn_num;
+	int len;
+	int32_t *p;
+
+	if ((test_item & CM_PANEL) != CM_PANEL)
+		return 0;
+
+	/*print columns gradient limit*/
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation,MAX_LIMIT,CM_MAX_GRADIENT_COLS_PERCENT,",
+		index);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_max_table_gradient_cols_percent[0],
+		index,
+		configuration->cm_max_table_gradient_cols_percent_size);
+	index = prepare_print_string(out_buf, "\n", index);
+	/*print rows gradient limit*/
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation,MAX_LIMIT,CM_MAX_GRADIENT_ROWS_PERCENT,",
+		index);
+	index = prepare_print_data(out_buf,
+		&configuration->cm_max_table_gradient_rows_percent[0],
+		index,
+		configuration->cm_max_table_gradient_rows_percent_size);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*print cm max limit*/
+	for (i = 0; i < rx_num; i++) {
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,MAX_LIMITS,CM_DATA_ROW",
+			index);
+		index = prepare_print_data(out_buf, &i, index, 1);
+		for (j = 0; j < tx_num; j++) {
+			len = i * tx_num * 2 + j * 2 + 1;
+			index = prepare_print_data(
+				out_buf,
+				&configuration->cm_min_max_table_sensor[len],
+				index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if (((test_item & CM_BTN) == CM_BTN) && (btn_num > 0)) {
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,MAX LIMITS,M_BUTNS,",
+			index);
+		for (j = 0; j < btn_num; j++) {
+			len = 2 * j + 1;
+			index = prepare_print_data(out_buf,
+				&configuration->cm_min_max_table_button[len],
+				index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation MAX LIMITS\n", index);
+
+	if ((test_item & CM_PANEL) == CM_PANEL) {
+		/*print cm min limit*/
+
+		p = (int32_t *)&configuration->cm_min_max_table_sensor[0];
+		for (i = 0; i < rx_num; i++) {
+			index = prepare_print_string(out_buf,
+				",Sensor Cm Validation,MIN_LIMITS,CM_DATA_ROW",
+				index);
+			index = prepare_print_data(out_buf, &i, index, 1);
+			for (j = 0; j < tx_num; j++) {
+				len = i * tx_num * 2 + j * 2;
+				index = prepare_print_data(
+					out_buf, p + len, index, 1);
+			}
+			index = prepare_print_string(out_buf, "\n", index);
+		}
+	}
+
+	if (((test_item & CM_BTN) == CM_BTN) && (btn_num > 0)) {
+		index = prepare_print_string(out_buf,
+			",Sensor Cm Validation,MIN LIMITS,M_BUTNS,", index);
+		for (j = 0; j < btn_num; j++)
+			index = prepare_print_data(out_buf,
+				&configuration->cm_min_max_table_button[2 * j],
+				index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	index = prepare_print_string(out_buf,
+		",Sensor Cm Validation MIN LIMITS\n", index);
+	return 0;
+}
+
+static int save_engineering_data_2(
+		char *out_buf, int index, struct cmcp_data *cmcp_info,
+		struct configuration *configuration,
+		struct result *result, int test_item, int no_builtin_file)
+{
+	int i;
+	int tx_num = cmcp_info->tx_num;
+	int rx_num = cmcp_info->rx_num;
+	int btn_num = cmcp_info->btn_num;
+	int32_t *p;
+	int32_t *px;
+	int32_t *pb;
+
+	if ((test_item & CM_ENABLED) == CM_ENABLED)
+		save_engineering_data_2_0(out_buf, index, cmcp_info,
+			configuration, result, test_item, no_builtin_file);
+
+	if ((test_item & CP_ENABLED) != CP_ENABLED)
+		return 0;
+
+	if ((test_item & CP_PANEL) == CP_PANEL) {
+		/*print cp tx max limit*/
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MAX_LIMITS,TX,",
+			index);
+
+		p = (int32_t *)&configuration->cp_min_max_table_tx[0];
+		for (i = 0; i < tx_num; i++) {
+			index = prepare_print_data(out_buf,
+				p + i * 2 + 1, index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print cp rx max limit*/
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MAX_LIMITS,RX,",
+			index);
+
+		px = (int32_t *)&configuration->cp_min_max_table_rx[0];
+		for (i = 0; i < rx_num; i++) {
+			index = prepare_print_data(out_buf,
+				px + i * 2 + 1, index, 1);
+		}
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	/*print cp btn max limit*/
+	if (((test_item & CP_BTN) == CP_BTN) && (btn_num > 0)) {
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MAX_LIMITS,S_BUTNS,",
+			index);
+
+		pb = (int32_t *)&configuration->cp_min_max_table_button[0];
+		for (i = 0; i < btn_num; i++)
+			index = prepare_print_data(out_buf,
+				pb + i * 2 + 1, index, 1);
+
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	if ((test_item & CP_PANEL) == CP_PANEL) {
+		/*print cp tx min limit*/
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MIN_LIMITS,TX,",
+			index);
+		for (i = 0; i < tx_num; i++)
+			index = prepare_print_data(out_buf,
+				&configuration->cp_min_max_table_tx[i * 2],
+				index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+
+		/*print cp rx min limit*/
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MIN_LIMITS,RX,", index);
+		for (i = 0; i < rx_num; i++)
+			index = prepare_print_data(out_buf,
+				&configuration->cp_min_max_table_rx[i * 2],
+				index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	/*print cp btn min limit*/
+	if (((test_item & CP_BTN) == CP_BTN) && (btn_num > 0)) {
+		index = prepare_print_string(out_buf,
+			",Self-cap Calibration Check,MIN_LIMITS,S_BUTNS,",
+			index);
+		for (i = 0; i < btn_num; i++)
+			index = prepare_print_data(out_buf,
+				&configuration->cp_min_max_table_button[i * 2],
+				index, 1);
+		index = prepare_print_string(out_buf, "\n", index);
+	}
+
+	return 0;
+}
+
+static int save_engineering_data(struct device *dev, char *out_buf, int index,
+	struct cmcp_data *cmcp_info, struct configuration *configuration,
+	struct result *result, int test_item, int no_builtin_file)
+{
+	int i;
+	uint32_t fw_revision_control;
+	uint32_t fw_config_ver;
+	char device_id[20] = {0};
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+
+	fw_revision_control = dad->si->cydata.revctrl;
+	fw_config_ver = dad->si->cydata.fw_ver_conf;
+	/*calculate silicon id*/
+	result->device_id_low = 0;
+	result->device_id_high = 0;
+
+	for (i = 0; i < 4; i++)
+		result->device_id_low = (result->device_id_low << 8)
+			+ dad->si->cydata.mfg_id[i];
+
+	for (i = 4; i < 8; i++)
+		result->device_id_high = (result->device_id_high << 8)
+			+ dad->si->cydata.mfg_id[i];
+
+	scnprintf(device_id, 20, "%x%x",
+		result->device_id_high, result->device_id_low);
+
+	/*print test summary*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->test_summary)
+		index = prepare_print_string(out_buf, ",PASS,\n", index);
+	else
+		index = prepare_print_string(out_buf, ",FAIL,\n", index);
+
+	/*revision ctrl number*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf, ",FW revision Control,", index);
+	index = prepare_print_data(out_buf, &fw_revision_control, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*config version*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	index = prepare_print_string(out_buf, ",CONFIG_VER,", index);
+	index = prepare_print_data(out_buf, &fw_config_ver, index, 1);
+	index = prepare_print_string(out_buf, "\n", index);
+
+	/*short test*/
+	index = print_silicon_id(out_buf, &device_id[0], index);
+	if (result->short_test_pass)
+		index = prepare_print_string(out_buf, ",Shorts,PASS,\n", index);
+	else
+		index = prepare_print_string(out_buf, ",Shorts,FAIL,\n", index);
+
+	if ((test_item & CM_ENABLED) == CM_ENABLED)
+		save_engineering_data_0(dad, out_buf, index, cmcp_info,
+			configuration, result, test_item, no_builtin_file);
+
+	if ((test_item & CP_ENABLED) == CP_ENABLED)
+		save_engineering_data_1(dad, out_buf, index, cmcp_info,
+			configuration, result, test_item, no_builtin_file);
+
+	if (!no_builtin_file)
+		save_engineering_data_2(out_buf, index, cmcp_info,
+			configuration, result, test_item, no_builtin_file);
+
+	return index;
+}
+
+static int result_save(struct device *dev, char __user *buf,
+	struct configuration *configuration, struct result *result,
+	struct cmcp_data *cmcp_info, loff_t *ppos, size_t count, int test_item,
+	int no_builtin_file)
+{
+	u8 *out_buf = NULL;
+	int index = 0;
+	int byte_left = 0;
+
+	if (configuration == NULL) {
+		dev_err(dev, "config is NULL");
+		return byte_left;
+	}
+	if (result == NULL) {
+		dev_err(dev, "result is NULL");
+		return byte_left;
+	}
+	if (cmcp_info == NULL) {
+		dev_err(dev, "cmcp_info is NULL");
+		return byte_left;
+	}
+
+	out_buf = kzalloc(MAX_BUF_LEN, GFP_KERNEL);
+	if (!out_buf)
+		return byte_left;
+
+	index = save_header(out_buf, index, result);
+	index = save_engineering_data(dev, out_buf, index,
+			cmcp_info, configuration, result,
+			test_item, no_builtin_file);
+	byte_left = simple_read_from_buffer((void __user *)buf, count,
+			ppos, out_buf, index);
+
+	kfree(out_buf);
+	return byte_left;
+}
+
+static int cmcp_results_debugfs_open(struct inode *inode,
+		struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static int cmcp_results_debugfs_close(struct inode *inode,
+		struct file *filp)
+{
+	filp->private_data = NULL;
+	return 0;
+}
+
+static ssize_t cmcp_results_debugfs_read(struct file *filp,
+		char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_data *dad = filp->private_data;
+	struct device *dev;
+	struct cmcp_data *cmcp_info = dad->cmcp_info;
+	struct result *result = dad->result;
+	struct configuration *configuration = dad->configs;
+	int ret = 0;
+	int test_item;
+	int no_builtin_file = 0;
+	int test_executed = 0;
+	char warning_info[] = "No test result available!\n";
+
+	dev = dad->dev;
+
+	mutex_lock(&dad->sysfs_lock);
+	test_executed = dad->test_executed;
+	test_item = cyttsp5_cmcp_get_test_item(dad->cmcp_test_items);
+
+	if (dad->builtin_cmcp_threshold_status < 0) {
+		dev_err(dev, "%s: No cmcp threshold file.\n", __func__);
+		no_builtin_file = 1;
+	}
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (test_executed) /*save result to buf*/
+		ret = result_save(dev, buf, configuration, result,
+			cmcp_info, ppos, count, test_item, no_builtin_file);
+	else {
+		dev_err(dev, "%s: No test result available!\n", __func__);
+
+		return simple_read_from_buffer((void __user *)buf, count, ppos,
+				warning_info, strlen(warning_info));
+	}
+
+	return ret;
+}
+
+static const struct file_operations cmcp_results_debugfs_fops = {
+	.open = cmcp_results_debugfs_open,
+	.release = cmcp_results_debugfs_close,
+	.read = cmcp_results_debugfs_read,
+	.write = NULL,
+};
+
+static ssize_t cyttsp5_cmcp_threshold_loading_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	bool cmcp_threshold_loading;
+
+	mutex_lock(&dad->cmcp_threshold_lock);
+	cmcp_threshold_loading = dad->cmcp_threshold_loading;
+	mutex_unlock(&dad->cmcp_threshold_lock);
+
+	return snprintf(buf, CY_MAX_PRBUF_SIZE, "%d\n", cmcp_threshold_loading);
+}
+
+/* Return the buffer offset of new test case */
+static u32 cmcp_return_offset_of_new_case(const char *bufPtr, u32 first_time)
+{
+	static u32 offset, first_search;
+
+	if (first_time == 0) {
+		first_search = 0;
+		offset = 0;
+	}
+
+	if (first_search != 0) {
+		/* Search one case */
+		for (;;) {
+			/* Search ASCII_LF */
+			while (*bufPtr++ != ASCII_LF)
+				offset++;
+
+			offset++;
+			/*
+			 * Single line: end loop
+			 * Multiple lines: continue loop
+			 */
+			if (*bufPtr != ASCII_COMMA)
+				break;
+		}
+	} else
+		first_search = 1;
+
+	return offset;
+}
+
+/* Get test case information from cmcp threshold file */
+static u32 cmcp_get_case_info_from_threshold_file(
+		struct device *dev, const char *buf,
+		struct test_case_search *search_array, u32 file_size)
+{
+	u32 case_num = 0, buffer_offset = 0, name_count = 0, first_search = 0;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Search cmcp threshold file\n",
+		__func__);
+
+	/* Get all the test cases */
+	for (case_num = 0; case_num < MAX_CASE_NUM; case_num++) {
+		buffer_offset = cmcp_return_offset_of_new_case(
+					&buf[buffer_offset], first_search);
+		first_search = 1;
+
+		if (buf[buffer_offset] == 0)
+			break;
+
+		for (name_count = 0; name_count < NAME_SIZE_MAX; name_count++) {
+			/* File end */
+			if (buf[buffer_offset + name_count] == ASCII_COMMA)
+				break;
+
+			search_array[case_num].name[name_count] =
+				buf[buffer_offset + name_count];
+		}
+
+		/* Exit when buffer offset is larger than file size */
+		if (buffer_offset >= file_size)
+			break;
+
+		search_array[case_num].name_size = name_count;
+		search_array[case_num].offset = buffer_offset;
+		/*
+		 *  parade_debug(dev, DEBUG_LEVEL_2, "Find case %d: Name is %s;
+		 *  Name size is %d; Case offset is %d\n",
+		 *	case_num,
+		 *	search_array[case_num].name,
+		 *	search_array[case_num].name_size,
+		 *	search_array[case_num].offset);
+		 */
+	}
+
+	return case_num;
+}
+
+/* Compose one value based on data of each bit */
+static int cmcp_compose_data(char *buf, u32 count)
+{
+	u32 base_array[] = {1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9};
+	int value = 0;
+	u32 index = 0;
+
+	for (index = 0; index < count; index++)
+		value += buf[index] * base_array[count - 1 - index];
+
+	return value;
+}
+
+/* Return one value */
+static int cmcp_return_one_value(struct device *dev,
+		const char *buf, u32 *offset, u32 *line_num)
+{
+	int value = -1;
+	char tmp_buffer[10];
+	u32 count = 0;
+	u32 tmp_offset = *offset;
+	static u32 line_count = 1;
+
+	/* Bypass extra commas */
+	while (buf[tmp_offset] == ASCII_COMMA
+			&& buf[tmp_offset + 1] == ASCII_COMMA)
+		tmp_offset++;
+
+	/* Windows and Linux difference at the end of one line */
+	if (buf[tmp_offset] == ASCII_COMMA
+			&& buf[tmp_offset + 1] == ASCII_CR
+			&& buf[tmp_offset + 2] == ASCII_LF)
+		tmp_offset += 2;
+	else if (buf[tmp_offset] == ASCII_COMMA
+			&& buf[tmp_offset + 1] == ASCII_LF)
+		tmp_offset += 1;
+	else if (buf[tmp_offset] == ASCII_COMMA
+			&& buf[tmp_offset + 1] == ASCII_CR)
+		tmp_offset += 1;
+
+	/* New line for multiple lines */
+	if (buf[tmp_offset] == ASCII_LF && buf[tmp_offset + 1] == ASCII_COMMA) {
+		tmp_offset++;
+		line_count++;
+		/*parade_debug(dev, DEBUG_LEVEL_2, "\n");*/
+	}
+
+	/* Beginning */
+	if (buf[tmp_offset] == ASCII_COMMA) {
+		tmp_offset++;
+		for (;;) {
+			if ((buf[tmp_offset] >= ASCII_ZERO)
+				&& (buf[tmp_offset] <= ASCII_NINE)) {
+
+				tmp_buffer[count++] =
+					buf[tmp_offset] - ASCII_ZERO;
+				tmp_offset++;
+			} else {
+				if (count != 0)
+					value = cmcp_compose_data(tmp_buffer,
+						count);
+				else /* 0 indicates no data available */
+					value = -1;
+				break;
+			}
+		}
+	} else {
+		/* Multiple line: line count */
+		*line_num = line_count;
+		/* Reset for next case */
+		line_count = 1;
+	}
+
+	*offset = tmp_offset;
+
+	return value;
+}
+
+/* Get configuration information */
+static void cmcp_get_configuration_info(struct device *dev,
+		const char *buf, struct test_case_search *search_array,
+		u32 case_count, struct test_case_field *field_array,
+		struct configuration *config)
+{
+	u32 count = 0, sub_count = 0;
+	u32 exist_or_not = 0;
+	u32 value_offset = 0;
+	int retval = 0;
+	u32 data_num = 0;
+	u32 line_num = 1;
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Fill configuration struct per cmcp threshold file\n",
+		__func__);
+
+	/* Search cases */
+	for (count = 0; count < MAX_CASE_NUM; count++) {
+		exist_or_not = 0;
+		for (sub_count = 0; sub_count < case_count; sub_count++) {
+			if (!strncmp(field_array[count].name,
+					search_array[sub_count].name,
+					field_array[count].name_size)) {
+				exist_or_not = 1;
+				break;
+			}
+		}
+
+		field_array[count].exist_or_not = exist_or_not;
+
+		/* Clear data number */
+		data_num = 0;
+
+		if (exist_or_not != 1) {
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"%s: !!! %s doesn't exist\n",
+				__func__, field_array[count].name);
+			continue;
+		}
+
+		switch (field_array[count].type) {
+		case TEST_CASE_TYPE_NO:
+			field_array[count].data_num = 0;
+			field_array[count].line_num = 1;
+			break;
+
+		case TEST_CASE_TYPE_ONE:
+			value_offset = search_array[sub_count].offset
+				+ search_array[sub_count].name_size;
+			*field_array[count].bufptr =
+				cmcp_return_one_value(dev, buf,
+					&value_offset, NULL);
+			field_array[count].data_num = 1;
+			field_array[count].line_num = 1;
+			break;
+
+		case TEST_CASE_TYPE_MUL:
+		case TEST_CASE_TYPE_MUL_LINES:
+			line_num = 1;
+			value_offset = search_array[sub_count].offset
+				+ search_array[sub_count].name_size;
+			for (;;) {
+				retval = cmcp_return_one_value(dev,
+					buf, &value_offset, &line_num);
+				if (retval >= 0) {
+					*field_array[count].bufptr++ =
+					retval;
+					data_num++;
+				} else
+					break;
+			}
+
+			field_array[count].data_num = data_num;
+			field_array[count].line_num = line_num;
+			break;
+
+		default:
+			break;
+		}
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: %s: Data number is %d, line number is %d\n",
+			__func__,
+			field_array[count].name,
+			field_array[count].data_num,
+			field_array[count].line_num);
+	}
+}
+
+/* Get basic information, like tx, rx, button number */
+static void cmcp_get_basic_info(struct device *dev,
+	struct test_case_field *field_array, struct configuration *config)
+{
+#define CMCP_DEBUG 0
+	u32 tx_num = 0;
+#if CMCP_DEBUG
+	u32 index = 0;
+#endif
+
+	config->is_valid_or_not = 1; /* Set to valid by default */
+	config->cm_enabled = 0;
+	config->cp_enabled = 0;
+
+	if (field_array[CM_TEST_INPUTS].exist_or_not)
+		config->cm_enabled = 1;
+	if (field_array[CP_TEST_INPUTS].exist_or_not)
+		config->cp_enabled = 1;
+
+	if (!config->cm_enabled || !config->cp_enabled) {
+		if (!config->cm_enabled)
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"%s: Miss CM thresholds or wrong data format\n",
+				__func__);
+
+		if (!config->cp_enabled)
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"%s: Miss CP thresholds or wrong data format\n",
+				__func__);
+
+		config->rx_num = 0;
+		config->tx_num = 0;
+		config->btn_num = 0;
+		config->is_valid_or_not = 0;
+		goto exit;
+	}
+
+	/* Get basic information only when CM and CP are enabled */
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Find CM and CP thresholds\n", __func__);
+
+	config->rx_num =
+		field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].line_num;
+	tx_num = (field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].data_num >> 1)
+		/ field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].line_num;
+	config->tx_num = tx_num;
+
+	config->btn_num =
+		field_array[PER_ELEMENT_MIN_MAX_TABLE_BUTTON].data_num >> 1;
+
+	config->cm_min_max_table_button_size =
+		field_array[PER_ELEMENT_MIN_MAX_TABLE_BUTTON].data_num;
+	config->cm_min_max_table_sensor_size =
+		field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].data_num;
+	config->cp_min_max_table_rx_size =
+		field_array[PER_ELEMENT_MIN_MAX_RX].data_num;
+	config->cp_min_max_table_tx_size =
+		field_array[PER_ELEMENT_MIN_MAX_TX].data_num;
+	config->cm_max_table_gradient_cols_percent_size =
+		field_array[CM_GRADIENT_CHECK_COL].data_num;
+	config->cm_max_table_gradient_rows_percent_size =
+		field_array[CM_GRADIENT_CHECK_ROW].data_num;
+	config->cp_min_max_table_button_size =
+		field_array[CP_PER_ELEMENT_MIN_MAX_BUTTON].data_num;
+
+#if CMCP_DEBUG
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_excluding_col_edge);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_excluding_row_edge);
+
+	for (index = 0; index < config->cm_max_table_gradient_cols_percent_size;
+		index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cm_max_table_gradient_cols_percent[index]);
+
+	for (index = 0; index < config->cm_max_table_gradient_rows_percent_size;
+		index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cm_max_table_gradient_rows_percent[index]);
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_range_limit_row);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_range_limit_col);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_min_limit_cal);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->cm_max_limit_cal);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+		config->cm_max_delta_sensor_percent);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+		config->cm_max_delta_button_percent);
+
+	for (index = 0; index < config->cm_min_max_table_button_size; index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cm_min_max_table_button[index]);
+
+	for (index = 0; index < config->cm_min_max_table_sensor_size; index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cm_min_max_table_sensor[index]);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+		config->cp_max_delta_sensor_rx_percent);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+		config->cp_max_delta_sensor_tx_percent);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+		config->cp_max_delta_button_percent);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->min_button);
+	parade_debug(dev, DEBUG_LEVEL_2, "%d\n", config->max_button);
+
+	for (index = 0; index < config->cp_min_max_table_button_size; index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cp_min_max_table_button[index]);
+	for (index = 0; index < config->cp_min_max_table_rx_size; index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cp_min_max_table_rx[index]);
+	for (index = 0; index < config->cp_min_max_table_tx_size; index++)
+		parade_debug(dev, DEBUG_LEVEL_2, "%d\n",
+			config->cp_min_max_table_tx[index]);
+#endif
+	/* Invalid mutual data length */
+	if ((field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].data_num >> 1)
+		% field_array[PER_ELEMENT_MIN_MAX_TABLE_SENSOR].line_num) {
+		config->is_valid_or_not = 0;
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"Invalid mutual data length\n");
+	}
+
+exit:
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s:\n Input file is %s!\nCM test: %s\n",
+		__func__,
+		config->is_valid_or_not == 1 ? "VALID" : "!!! INVALID !!!",
+		config->cm_enabled == 1 ? "Found" : "Not found");
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"CP test: %s\nrx_num is %d\ntx_num is %d\nbtn_num is %d\n",
+		config->cp_enabled == 1 ? "Found" : "Not found",
+		config->rx_num,
+		config->tx_num,
+		config->btn_num);
+}
+
+static void cmcp_test_case_field_init(struct test_case_field *test_field_array,
+	struct configuration *configs)
+{
+	struct test_case_field test_case_field_array[MAX_CASE_NUM] = {
+		{"CM TEST INPUTS", 14, TEST_CASE_TYPE_NO,
+				NULL, 0, 0, 0},
+		{"CM_EXCLUDING_COL_EDGE", 21, TEST_CASE_TYPE_ONE,
+				&configs->cm_excluding_col_edge, 0, 0, 0},
+		{"CM_EXCLUDING_ROW_EDGE", 21, TEST_CASE_TYPE_ONE,
+				&configs->cm_excluding_row_edge, 0, 0, 0},
+		{"CM_GRADIENT_CHECK_COL", 21, TEST_CASE_TYPE_MUL,
+				&configs->cm_max_table_gradient_cols_percent[0],
+				0, 0, 0},
+		{"CM_GRADIENT_CHECK_ROW", 21, TEST_CASE_TYPE_MUL,
+				&configs->cm_max_table_gradient_rows_percent[0],
+				0, 0, 0},
+		{"CM_RANGE_LIMIT_ROW", 18, TEST_CASE_TYPE_ONE,
+				&configs->cm_range_limit_row, 0, 0, 0},
+		{"CM_RANGE_LIMIT_COL", 18, TEST_CASE_TYPE_ONE,
+				&configs->cm_range_limit_col, 0, 0, 0},
+		{"CM_MIN_LIMIT_CAL", 16, TEST_CASE_TYPE_ONE,
+				&configs->cm_min_limit_cal, 0, 0, 0},
+		{"CM_MAX_LIMIT_CAL", 16, TEST_CASE_TYPE_ONE,
+				&configs->cm_max_limit_cal, 0, 0, 0},
+		{"CM_MAX_DELTA_SENSOR_PERCENT", 27, TEST_CASE_TYPE_ONE,
+				&configs->cm_max_delta_sensor_percent, 0, 0, 0},
+		{"CM_MAX_DELTA_BUTTON_PERCENT", 27, TEST_CASE_TYPE_ONE,
+				&configs->cm_max_delta_button_percent, 0, 0, 0},
+		{"PER_ELEMENT_MIN_MAX_TABLE_BUTTON", 32, TEST_CASE_TYPE_MUL,
+				&configs->cm_min_max_table_button[0], 0, 0, 0},
+		{"PER_ELEMENT_MIN_MAX_TABLE_SENSOR", 32,
+				TEST_CASE_TYPE_MUL_LINES,
+				&configs->cm_min_max_table_sensor[0], 0, 0, 0},
+		{"CP TEST INPUTS", 14, TEST_CASE_TYPE_NO,
+				NULL, 0, 0, 0},
+		{"CP_PER_ELEMENT_MIN_MAX_BUTTON", 29, TEST_CASE_TYPE_MUL,
+				&configs->cp_min_max_table_button[0], 0, 0, 0},
+		{"CP_MAX_DELTA_SENSOR_RX_PERCENT", 30, TEST_CASE_TYPE_ONE,
+				&configs->cp_max_delta_sensor_rx_percent,
+				0, 0, 0},
+		{"CP_MAX_DELTA_SENSOR_TX_PERCENT", 30, TEST_CASE_TYPE_ONE,
+				&configs->cp_max_delta_sensor_tx_percent,
+				0, 0, 0},
+		{"CP_MAX_DELTA_BUTTON_PERCENT", 27, TEST_CASE_TYPE_ONE,
+				&configs->cp_max_delta_button_percent, 0, 0, 0},
+		{"MIN_BUTTON", 10, TEST_CASE_TYPE_ONE,
+				&configs->min_button, 0, 0, 0},
+		{"MAX_BUTTON", 10, TEST_CASE_TYPE_ONE,
+				&configs->max_button, 0, 0, 0},
+		{"PER_ELEMENT_MIN_MAX_RX", 22, TEST_CASE_TYPE_MUL,
+				&configs->cp_min_max_table_rx[0], 0, 0, 0},
+		{"PER_ELEMENT_MIN_MAX_TX", 22, TEST_CASE_TYPE_MUL,
+				&configs->cp_min_max_table_tx[0], 0, 0, 0},
+	};
+
+	memcpy(test_field_array, test_case_field_array,
+		sizeof(struct test_case_field) * MAX_CASE_NUM);
+}
+
+static ssize_t cyttsp5_parse_cmcp_threshold_file_common(
+	struct device *dev, const char *buf, u32 file_size)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	ssize_t rc = 0;
+	u32 case_count = 0;
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Start parsing cmcp threshold file. File size is %d\n",
+		__func__, file_size);
+
+	cmcp_test_case_field_init(dad->test_field_array, dad->configs);
+
+	/* Get all the cases from .csv file */
+	case_count = cmcp_get_case_info_from_threshold_file(dev,
+		buf, dad->test_search_array, file_size);
+
+	/* Search cases */
+	cmcp_get_configuration_info(dev,
+		buf,
+		dad->test_search_array, case_count, dad->test_field_array,
+		dad->configs);
+
+	/* Get basic information */
+	cmcp_get_basic_info(dev, dad->test_field_array, dad->configs);
+
+	return rc;
+}
+
+static ssize_t cyttsp5_cmcp_threshold_loading_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	long value;
+	int rc;
+
+	rc = kstrtol(buf, 10, &value);
+	if (rc < 0 || value < -1 || value > 1) {
+		dev_err(dev, "%s: Invalid value\n", __func__);
+		return size;
+	}
+
+	mutex_lock(&dad->cmcp_threshold_lock);
+
+	if (value == 1)
+		dad->cmcp_threshold_loading = true;
+	else if (value == -1)
+		dad->cmcp_threshold_loading = false;
+	else if (value == 0 && dad->cmcp_threshold_loading) {
+		dad->cmcp_threshold_loading = false;
+
+		if (dad->cmcp_threshold_size == 0) {
+			dev_err(dev, "%s: No cmcp threshold data\n", __func__);
+			goto exit_free;
+		}
+
+		/* Clear test executed flag */
+		dad->test_executed = 0;
+
+		cyttsp5_parse_cmcp_threshold_file_common(dev,
+			&dad->cmcp_threshold_data[0], dad->cmcp_threshold_size);
+
+		/* Mark valid */
+		dad->builtin_cmcp_threshold_status = 0;
+		/* Restore test item to default value when new file input */
+		dad->cmcp_test_items = 0;
+	}
+
+exit_free:
+	kfree(dad->cmcp_threshold_data);
+	dad->cmcp_threshold_data = NULL;
+	dad->cmcp_threshold_size = 0;
+
+	mutex_unlock(&dad->cmcp_threshold_lock);
+
+	if (rc)
+		return rc;
+
+	return size;
+}
+
+static DEVICE_ATTR(cmcp_threshold_loading, 0600,
+	cyttsp5_cmcp_threshold_loading_show,
+	cyttsp5_cmcp_threshold_loading_store);
+
+/*
+* cmcp threshold data write
+*/
+static ssize_t cyttsp5_cmcp_threshold_data_write(struct file *filp,
+		struct kobject *kobj, struct bin_attribute *bin_attr,
+		char *buf, loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	u8 *p;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: offset:%lld count:%zu\n",
+		__func__, offset, count);
+
+	mutex_lock(&dad->cmcp_threshold_lock);
+
+	if (!dad->cmcp_threshold_loading) {
+		mutex_unlock(&dad->cmcp_threshold_lock);
+		return -ENODEV;
+	}
+
+	p = krealloc(dad->cmcp_threshold_data, offset + count, GFP_KERNEL);
+	if (!p) {
+		kfree(dad->cmcp_threshold_data);
+		dad->cmcp_threshold_data = NULL;
+		mutex_unlock(&dad->cmcp_threshold_lock);
+		return -ENOMEM;
+	}
+	dad->cmcp_threshold_data = p;
+
+	memcpy(&dad->cmcp_threshold_data[offset], buf, count);
+	dad->cmcp_threshold_size += count;
+
+	mutex_unlock(&dad->cmcp_threshold_lock);
+
+	return count;
+}
+
+static struct bin_attribute bin_attr_cmcp_threshold_data = {
+	.attr = {
+		.name = "cmcp_threshold_data",
+		.mode = 0200,
+	},
+	.size = 0,
+	.write = cyttsp5_cmcp_threshold_data_write,
+};
+
+/*
+ * Suspend scan command
+ */
+static int cyttsp5_suspend_scan_cmd_(struct device *dev)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->suspend_scanning(dev, 0);
+	if (rc < 0)
+		dev_err(dev, "%s: Suspend scan failed r=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Resume scan command
+ */
+static int cyttsp5_resume_scan_cmd_(struct device *dev)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->resume_scanning(dev, 0);
+	if (rc < 0)
+		dev_err(dev, "%s: Resume scan failed r=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Execute scan command
+ */
+static int cyttsp5_exec_scan_cmd_(struct device *dev)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->exec_panel_scan(dev, 0);
+	if (rc < 0)
+		dev_err(dev, "%s: Heatmap start scan failed r=%d\n",
+			__func__, rc);
+	return rc;
+}
+
+/*
+ * Retrieve panel data command
+ */
+static int cyttsp5_ret_scan_data_cmd_(struct device *dev, u16 read_offset,
+		u16 read_count, u8 data_id, u8 *response, u8 *config,
+		u16 *actual_read_len, u8 *return_buf)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->retrieve_panel_scan(dev, 0, read_offset,
+			read_count, data_id, response, config, actual_read_len,
+			return_buf);
+	if (rc < 0)
+		dev_err(dev, "%s: Retrieve scan data failed r=%d\n",
+			__func__, rc);
+	return rc;
+}
+
+/*
+ * Get data structure command
+ */
+static int cyttsp5_get_data_structure_cmd_(struct device *dev, u16 read_offset,
+		u16 read_length, u8 data_id, u8 *status, u8 *data_format,
+		u16 *actual_read_len, u8 *data)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->get_data_structure(dev, 0, read_offset,
+			read_length, data_id, status, data_format,
+			actual_read_len, data);
+	if (rc < 0)
+		dev_err(dev, "%s: Get data structure failed r=%d\n",
+			__func__, rc);
+	return rc;
+}
+
+/*
+ * Run self test command
+ */
+static int cyttsp5_run_selftest_cmd_(struct device *dev, u8 test_id,
+		u8 write_idacs_to_flash, u8 *status, u8 *summary_result,
+		u8 *results_available)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->run_selftest(dev, 0, test_id,
+			write_idacs_to_flash, status, summary_result,
+			results_available);
+	if (rc < 0)
+		dev_err(dev, "%s: Run self test failed r=%d\n", __func__, rc);
+	return rc;
+}
+
+/*
+ * Get self test result command
+ */
+static int cyttsp5_get_selftest_result_cmd_(struct device *dev,
+		u16 read_offset, u16 read_length, u8 test_id, u8 *status,
+		u16 *actual_read_len, u8 *data)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->get_selftest_result(dev, 0, read_offset,
+			read_length, test_id, status, actual_read_len, data);
+	if (rc < 0)
+		dev_err(dev, "%s: Get self test result failed r=%d\n",
+			__func__, rc);
+	return rc;
+}
+
+/*
+ * Calibrate IDACs command
+ */
+static int _cyttsp5_calibrate_idacs_cmd(struct device *dev,
+		u8 sensing_mode, u8 *status)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->calibrate_idacs(dev, 0, sensing_mode, status);
+	return rc;
+}
+
+/*
+ * Initialize Baselines command
+ */
+static int _cyttsp5_initialize_baselines_cmd(struct device *dev,
+		u8 sensing_mode, u8 *status)
+{
+	int rc;
+
+	rc = cmd->nonhid_cmd->initialize_baselines(dev, 0, sensing_mode,
+			status);
+	return rc;
+}
+
+static int prepare_print_buffer(int status, u8 *in_buf, int length,
+		u8 *out_buf, size_t out_buf_size)
+{
+	int index = 0;
+	int i;
+
+	index += scnprintf(out_buf, out_buf_size, "status %d\n", status);
+
+	for (i = 0; i < length; i++)
+		index += scnprintf(&out_buf[index], out_buf_size - index,
+				"%02X\n", in_buf[i]);
+
+	return index;
+}
+static ssize_t cyttsp5_run_and_get_selftest_result_noprint(struct device *dev,
+		char *buf, size_t buf_len, u8 test_id, u16 read_length,
+		bool get_result_on_pass)
+{
+	int rc;
+	struct cyttsp5_device_access_data *dad =
+			cyttsp5_get_device_access_data(dev);
+	int status = STATUS_FAIL;
+	u8 cmd_status = 0;
+	u8 summary_result = 0;
+	u16 act_length = 0;
+	int length = 0;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = cyttsp5_run_selftest_cmd_(dev, test_id, 0,
+			&cmd_status, &summary_result, NULL);
+	if (rc < 0) {
+		dev_err(dev,
+			"%s: Error on run self test for test_id:%d r=%d\n",
+			__func__, test_id, rc);
+		goto resume_scan;
+	}
+
+	/* Form response buffer */
+	dad->ic_buf[0] = cmd_status;
+	dad->ic_buf[1] = summary_result;
+
+	length = 2;
+
+	/* Get data if command status is success */
+	if (cmd_status != CY_CMD_STATUS_SUCCESS)
+		goto status_success;
+
+	/* Get data unless test result is pass */
+	if (summary_result == CY_ST_RESULT_PASS && !get_result_on_pass)
+		goto status_success;
+
+	rc = cyttsp5_get_selftest_result_cmd_(dev, 0, read_length,
+			test_id, &cmd_status, &act_length, &dad->ic_buf[6]);
+	if (rc < 0) {
+		dev_err(dev,
+			"%s: Error on get self test result r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	dad->ic_buf[2] = cmd_status;
+	dad->ic_buf[3] = test_id;
+	dad->ic_buf[4] = LOW_BYTE(act_length);
+	dad->ic_buf[5] = HI_BYTE(act_length);
+
+	length = 6 + act_length;
+
+status_success:
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+	mutex_unlock(&dad->sysfs_lock);
+
+	return status;
+}
+
+static ssize_t cyttsp5_run_and_get_selftest_result(struct device *dev,
+		char *buf, size_t buf_len, u8 test_id, u16 read_length,
+		bool get_result_on_pass)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	int status = STATUS_FAIL;
+	u8 cmd_status = 0;
+	u8 summary_result = 0;
+	u16 act_length = 0;
+	int length = 0;
+	int size;
+	int rc;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = cyttsp5_run_selftest_cmd_(dev, test_id, 0,
+			&cmd_status, &summary_result, NULL);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on run self test for test_id:%d r=%d\n",
+			__func__, test_id, rc);
+		goto resume_scan;
+	}
+
+	/* Form response buffer */
+	dad->ic_buf[0] = cmd_status;
+	dad->ic_buf[1] = summary_result;
+
+	length = 2;
+
+	/* Get data if command status is success */
+	if (cmd_status != CY_CMD_STATUS_SUCCESS)
+		goto status_success;
+
+	/* Get data unless test result is pass */
+	if (summary_result == CY_ST_RESULT_PASS && !get_result_on_pass)
+		goto status_success;
+
+	rc = cyttsp5_get_selftest_result_cmd_(dev, 0, read_length,
+			test_id, &cmd_status, &act_length, &dad->ic_buf[6]);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on get self test result r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	dad->ic_buf[2] = cmd_status;
+	dad->ic_buf[3] = test_id;
+	dad->ic_buf[4] = LOW_BYTE(act_length);
+	dad->ic_buf[5] = HI_BYTE(act_length);
+
+	length = 6 + act_length;
+
+status_success:
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+
+	if (status == STATUS_FAIL)
+		length = 0;
+
+	size = prepare_print_buffer(status, dad->ic_buf, length, buf, buf_len);
+
+	mutex_unlock(&dad->sysfs_lock);
+
+	return size;
+}
+
+struct cyttsp5_device_access_debugfs_data {
+	struct cyttsp5_device_access_data *dad;
+	ssize_t pr_buf_len;
+	u8 pr_buf[3 * CY_MAX_PRBUF_SIZE];
+};
+
+static int cyttsp5_device_access_debugfs_open(struct inode *inode,
+		struct file *filp)
+{
+	struct cyttsp5_device_access_data *dad = inode->i_private;
+	struct cyttsp5_device_access_debugfs_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->dad = dad;
+
+	filp->private_data = data;
+
+	return nonseekable_open(inode, filp);
+}
+
+static int cyttsp5_device_access_debugfs_release(struct inode *inode,
+		struct file *filp)
+{
+	kfree(filp->private_data);
+
+	return 0;
+}
+
+#define CY_DEBUGFS_FOPS(_name, _read, _write) \
+static const struct file_operations _name##_debugfs_fops = { \
+	.open = cyttsp5_device_access_debugfs_open, \
+	.release = cyttsp5_device_access_debugfs_release, \
+	.read = _read, \
+	.write = _write, \
+}
+
+static ssize_t panel_scan_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	struct device *dev = dad->dev;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	int status = STATUS_FAIL;
+	u8 config;
+	u16 actual_read_len;
+	int length = 0;
+	u8 element_size = 0;
+	u8 *buf_offset;
+	int elem_offset = 0;
+	int rc;
+
+	if (*ppos)
+		goto exit;
+
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = cyttsp5_exec_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on execute panel scan r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	/* Set length to max to read all */
+	rc = cyttsp5_ret_scan_data_cmd_(dev, 0, 0xFFFF,
+			dad->panel_scan_data_id, dad->ic_buf, &config,
+			&actual_read_len, NULL);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on retrieve panel scan r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	length = get_unaligned_le16(&dad->ic_buf[0]);
+	buf_offset = dad->ic_buf + length;
+	element_size = config & 0x07;
+	elem_offset = actual_read_len;
+	while (actual_read_len > 0) {
+		rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, 0xFFFF,
+				dad->panel_scan_data_id, NULL, &config,
+				&actual_read_len, buf_offset);
+		if (rc < 0)
+			goto resume_scan;
+
+		length += actual_read_len * element_size;
+		buf_offset = dad->ic_buf + length;
+		elem_offset += actual_read_len;
+	}
+	/* Reconstruct cmd header */
+	put_unaligned_le16(length, &dad->ic_buf[0]);
+	put_unaligned_le16(elem_offset, &dad->ic_buf[7]);
+
+	/* Do not print command header */
+	length -= 5;
+
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+
+	if (status == STATUS_FAIL)
+		length = 0;
+	if (cd->show_timestamp) {
+		int index = 0;
+
+		index += scnprintf(data->pr_buf, sizeof(data->pr_buf),
+			"[%u] ", jiffies_to_msecs(jiffies));
+		data->pr_buf_len = prepare_print_buffer(status,
+			&dad->ic_buf[5], length, &data->pr_buf[index],
+			sizeof(data->pr_buf)-index);
+	} else {
+		data->pr_buf_len = prepare_print_buffer(status,
+			&dad->ic_buf[5], length, data->pr_buf,
+			sizeof(data->pr_buf));
+	}
+
+	mutex_unlock(&dad->sysfs_lock);
+
+exit:
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+static ssize_t panel_scan_debugfs_write(struct file *filp,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	ssize_t length;
+	int rc = 0;
+
+	rc = simple_write_to_buffer(data->pr_buf, sizeof(data->pr_buf), ppos,
+			buf, count);
+	if (rc < 0)
+		return rc;
+
+	count = rc;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	length = cyttsp5_ic_parse_input(dad->dev, data->pr_buf, count,
+			dad->ic_buf, CY_MAX_PRBUF_SIZE);
+
+	if (length != 1) {
+		dev_err(dad->dev, "%s: Malformed input\n", __func__);
+		rc = -EINVAL;
+		goto exit_unlock;
+	}
+
+	dad->panel_scan_data_id = dad->ic_buf[0];
+
+exit_unlock:
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+
+CY_DEBUGFS_FOPS(panel_scan, panel_scan_debugfs_read, panel_scan_debugfs_write);
+
+static ssize_t get_idac_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	struct device *dev = dad->dev;
+	int status = STATUS_FAIL;
+	u8 cmd_status = 0;
+	u8 data_format = 0;
+	u16 act_length = 0;
+	int length = 0;
+	int rc;
+
+	if (*ppos)
+		goto exit;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = cyttsp5_get_data_structure_cmd_(dev, 0, PIP_CMD_MAX_LENGTH,
+			dad->get_idac_data_id, &cmd_status, &data_format,
+			&act_length, &dad->ic_buf[5]);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on get data structure r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	dad->ic_buf[0] = cmd_status;
+	dad->ic_buf[1] = dad->get_idac_data_id;
+	dad->ic_buf[2] = LOW_BYTE(act_length);
+	dad->ic_buf[3] = HI_BYTE(act_length);
+	dad->ic_buf[4] = data_format;
+
+	length = 5 + act_length;
+
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+
+	if (status == STATUS_FAIL)
+		length = 0;
+
+	data->pr_buf_len = prepare_print_buffer(status, dad->ic_buf, length,
+			data->pr_buf, sizeof(data->pr_buf));
+
+	mutex_unlock(&dad->sysfs_lock);
+
+exit:
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+static ssize_t get_idac_debugfs_write(struct file *filp,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	ssize_t length;
+	int rc = 0;
+
+	rc = simple_write_to_buffer(data->pr_buf, sizeof(data->pr_buf), ppos,
+			buf, count);
+	if (rc < 0)
+		return rc;
+
+	count = rc;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	length = cyttsp5_ic_parse_input(dad->dev, data->pr_buf, count,
+			dad->ic_buf, CY_MAX_PRBUF_SIZE);
+	if (length != 1) {
+		dev_err(dad->dev, "%s: Malformed input\n", __func__);
+		rc = -EINVAL;
+		goto exit_unlock;
+	}
+
+	dad->get_idac_data_id = dad->ic_buf[0];
+
+exit_unlock:
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+
+CY_DEBUGFS_FOPS(get_idac, get_idac_debugfs_read, get_idac_debugfs_write);
+
+static ssize_t calibrate_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	struct device *dev = dad->dev;
+	int status = STATUS_FAIL;
+	int length = 0;
+	int rc;
+
+	if (*ppos)
+		goto exit;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = _cyttsp5_calibrate_idacs_cmd(dev, dad->calibrate_sensing_mode,
+			&dad->ic_buf[0]);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on calibrate idacs r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	length = 1;
+
+	/* Check if baseline initialization is requested */
+	if (dad->calibrate_initialize_baselines) {
+		/* Perform baseline initialization for all modes */
+		rc = _cyttsp5_initialize_baselines_cmd(dev, CY_IB_SM_MUTCAP |
+				CY_IB_SM_SELFCAP | CY_IB_SM_BUTTON,
+				&dad->ic_buf[length]);
+		if (rc < 0) {
+			dev_err(dev, "%s: Error on initialize baselines r=%d\n",
+				__func__, rc);
+			goto resume_scan;
+		}
+
+		length++;
+	}
+
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+
+	if (status == STATUS_FAIL)
+		length = 0;
+
+	data->pr_buf_len = prepare_print_buffer(status, dad->ic_buf, length,
+			data->pr_buf, sizeof(data->pr_buf));
+
+	mutex_unlock(&dad->sysfs_lock);
+
+exit:
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+static ssize_t calibrate_debugfs_write(struct file *filp,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	ssize_t length;
+	int rc = 0;
+
+	rc = simple_write_to_buffer(data->pr_buf, sizeof(data->pr_buf), ppos,
+			buf, count);
+	if (rc < 0)
+		return rc;
+
+	count = rc;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	length = cyttsp5_ic_parse_input(dad->dev, data->pr_buf, count,
+			dad->ic_buf, CY_MAX_PRBUF_SIZE);
+	if (length != 2) {
+		dev_err(dad->dev, "%s: Malformed input\n", __func__);
+		rc = -EINVAL;
+		goto exit_unlock;
+	}
+
+	dad->calibrate_sensing_mode = dad->ic_buf[0];
+	dad->calibrate_initialize_baselines = dad->ic_buf[1];
+
+exit_unlock:
+	mutex_unlock(&dad->sysfs_lock);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+
+CY_DEBUGFS_FOPS(calibrate, calibrate_debugfs_read, calibrate_debugfs_write);
+
+static ssize_t baseline_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	struct device *dev = dad->dev;
+	int status = STATUS_FAIL;
+	int length = 0;
+	int rc;
+
+	if (*ppos)
+		goto exit;
+
+	mutex_lock(&dad->sysfs_lock);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+			__func__, rc);
+		goto put_pm_runtime;
+	}
+
+	rc = cyttsp5_suspend_scan_cmd_(dev);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc);
+		goto release_exclusive;
+	}
+
+	rc = _cyttsp5_initialize_baselines_cmd(dev, dad->baseline_sensing_mode,
+			&dad->ic_buf[0]);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on initialize baselines r=%d\n",
+			__func__, rc);
+		goto resume_scan;
+	}
+
+	length = 1;
+
+	status = STATUS_SUCCESS;
+
+resume_scan:
+	cyttsp5_resume_scan_cmd_(dev);
+
+release_exclusive:
+	cmd->release_exclusive(dev);
+
+put_pm_runtime:
+	pm_runtime_put(dev);
+
+	if (status == STATUS_FAIL)
+		length = 0;
+
+	data->pr_buf_len = prepare_print_buffer(status, dad->ic_buf, length,
+			data->pr_buf, sizeof(data->pr_buf));
+
+	mutex_unlock(&dad->sysfs_lock);
+
+exit:
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+static ssize_t baseline_debugfs_write(struct file *filp,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+	struct cyttsp5_device_access_data *dad = data->dad;
+	ssize_t length;
+	int rc = 0;
+	char *kbuf = NULL;
+
+	rc = simple_write_to_buffer(data->pr_buf, sizeof(data->pr_buf), ppos,
+			buf, count);
+	if (rc < 0)
+		return rc;
+
+	count = rc;
+
+	kbuf = kmalloc(count, GFP_KERNEL);
+	mutex_lock(&dad->sysfs_lock);
+
+	if (copy_from_user(kbuf, buf, count)) {
+		rc = -EINVAL;
+		goto exit_unlock;
+	}
+
+	length = cyttsp5_ic_parse_input(dad->dev, (const char *)kbuf,
+			count, dad->ic_buf, CY_MAX_PRBUF_SIZE);
+	if (length != 1) {
+		dev_err(dad->dev, "%s: Malformed input\n", __func__);
+		rc = -EINVAL;
+		goto exit_unlock;
+	}
+
+	dad->baseline_sensing_mode = dad->ic_buf[0];
+
+exit_unlock:
+	mutex_unlock(&dad->sysfs_lock);
+	kfree(kbuf);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+
+CY_DEBUGFS_FOPS(baseline, baseline_debugfs_read, baseline_debugfs_write);
+
+static ssize_t auto_shorts_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_AUTOSHORTS, PIP_CMD_MAX_LENGTH, false);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(auto_shorts, auto_shorts_debugfs_read, NULL);
+
+static ssize_t opens_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_OPENS, PIP_CMD_MAX_LENGTH, false);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(opens, opens_debugfs_read, NULL);
+
+static ssize_t cm_panel_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_CM_PANEL, PIP_CMD_MAX_LENGTH, true);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(cm_panel, cm_panel_debugfs_read, NULL);
+
+static ssize_t cp_panel_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_CP_PANEL, PIP_CMD_MAX_LENGTH, true);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(cp_panel, cp_panel_debugfs_read, NULL);
+
+static ssize_t cm_button_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_CM_BUTTON, PIP_CMD_MAX_LENGTH, true);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(cm_button, cm_button_debugfs_read, NULL);
+
+static ssize_t cp_button_debugfs_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_debugfs_data *data = filp->private_data;
+
+	if (!*ppos)
+		/* Set length to PIP_CMD_MAX_LENGTH to read all */
+		data->pr_buf_len = cyttsp5_run_and_get_selftest_result(
+			data->dad->dev, data->pr_buf, sizeof(data->pr_buf),
+			CY_ST_ID_CP_BUTTON, PIP_CMD_MAX_LENGTH, true);
+
+	return simple_read_from_buffer((void __user *)buf, count, ppos,
+			data->pr_buf, data->pr_buf_len);
+}
+
+CY_DEBUGFS_FOPS(cp_button, cp_button_debugfs_read, NULL);
+
+#ifdef TTHE_TUNER_SUPPORT
+static ssize_t tthe_get_panel_data_debugfs_read(struct file *filp,
+		char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_data *dad = filp->private_data;
+	struct device *dev;
+	struct cyttsp5_core_data *cd;
+	u8 config;
+	u16 actual_read_len;
+	u16 length = 0;
+	u8 element_size = 0;
+	u8 *buf_offset;
+	u8 *buf_out;
+	int elem;
+	int elem_offset = 0;
+	int print_idx = 0;
+	int rc;
+	int rc1;
+	int i;
+
+	mutex_lock(&dad->debugfs_lock);
+	dev = dad->dev;
+	cd = dev_get_drvdata(dev);
+	buf_out = dad->tthe_get_panel_data_buf;
+	if (!buf_out)
+		goto release_mutex;
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0)
+		goto put_runtime;
+
+	if (dad->heatmap.scan_start) {
+		/*
+		 * To fix CDT206291: avoid multiple scans when
+		 * return data is larger than 4096 bytes in one cycle
+		 */
+		dad->heatmap.scan_start = 0;
+
+		/* Start scan */
+		rc = cyttsp5_exec_scan_cmd_(dev);
+		if (rc < 0)
+			goto release_exclusive;
+	}
+
+	elem = dad->heatmap.num_element;
+
+#if defined(CY_ENABLE_MAX_ELEN)
+	if (elem > CY_MAX_ELEN)
+		rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset,
+			CY_MAX_ELEN, dad->heatmap.data_type, dad->ic_buf,
+			&config, &actual_read_len, NULL);
+	else
+		rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem,
+			dad->heatmap.data_type, dad->ic_buf, &config,
+			&actual_read_len, NULL);
+
+#else
+	rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem,
+			dad->heatmap.data_type, dad->ic_buf, &config,
+			&actual_read_len, NULL);
+#endif
+	if (rc < 0)
+		goto release_exclusive;
+
+	length = get_unaligned_le16(&dad->ic_buf[0]);
+	buf_offset = dad->ic_buf + length;
+
+	element_size = config & CY_CMD_RET_PANEL_ELMNT_SZ_MASK;
+
+	elem -= actual_read_len;
+	elem_offset = actual_read_len;
+	while (elem > 0) {
+#ifdef CY_ENABLE_MAX_ELEN
+		if (elem > CY_MAX_ELEN)
+			rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset,
+				CY_MAX_ELEN, dad->heatmap.data_type, NULL,
+				&config, &actual_read_len, buf_offset);
+		else
+			rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem,
+				dad->heatmap.data_type, NULL, &config,
+				&actual_read_len, buf_offset);
+#else
+
+		rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem,
+				dad->heatmap.data_type, NULL, &config,
+				&actual_read_len, buf_offset);
+#endif
+		if (rc < 0)
+			goto release_exclusive;
+
+		if (!actual_read_len)
+			break;
+
+		length += actual_read_len * element_size;
+		buf_offset = dad->ic_buf + length;
+		elem -= actual_read_len;
+		elem_offset += actual_read_len;
+	}
+
+	/* Reconstruct cmd header */
+	put_unaligned_le16(length, &dad->ic_buf[0]);
+	put_unaligned_le16(elem_offset, &dad->ic_buf[7]);
+
+release_exclusive:
+	rc1 = cmd->release_exclusive(dev);
+put_runtime:
+	pm_runtime_put(dev);
+
+	if (rc < 0)
+		goto release_mutex;
+	if (cd->show_timestamp)
+		print_idx += scnprintf(buf_out, TTHE_TUNER_MAX_BUF,
+			"[%u] CY_DATA:", jiffies_to_msecs(jiffies));
+	else
+		print_idx += scnprintf(buf_out, TTHE_TUNER_MAX_BUF,
+			"CY_DATA:");
+	for (i = 0; i < length; i++)
+		print_idx += scnprintf(buf_out + print_idx,
+				TTHE_TUNER_MAX_BUF - print_idx,
+				"%02X ", dad->ic_buf[i]);
+	print_idx += scnprintf(buf_out + print_idx,
+			TTHE_TUNER_MAX_BUF - print_idx,
+			":(%d bytes)\n", length);
+	rc = simple_read_from_buffer((void __user *)buf, count, ppos,
+			buf_out, print_idx);
+	print_idx = rc;
+
+release_mutex:
+	mutex_unlock(&dad->debugfs_lock);
+	return print_idx;
+}
+
+static ssize_t tthe_get_panel_data_debugfs_write(struct file *filp,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct cyttsp5_device_access_data *dad = filp->private_data;
+	struct device *dev = dad->dev;
+	ssize_t length;
+	int max_read;
+	u8 *buf_in = dad->tthe_get_panel_data_buf;
+	int ret;
+
+	mutex_lock(&dad->debugfs_lock);
+	ret = copy_from_user(buf_in + (*ppos), buf, count);
+	if (ret)
+		goto exit;
+	buf_in[count] = 0;
+
+	length = cyttsp5_ic_parse_input(dev, buf_in, count, dad->ic_buf,
+			CY_MAX_PRBUF_SIZE);
+	if (length <= 0) {
+		dev_err(dev, "%s: %s Group Data store\n", __func__,
+			"Malformed input for");
+		goto exit;
+	}
+
+	/* update parameter value */
+	dad->heatmap.num_element = get_unaligned_le16(&dad->ic_buf[3]);
+	dad->heatmap.data_type = dad->ic_buf[5];
+
+	if (dad->ic_buf[6] > 0)
+		dad->heatmap.scan_start = true;
+	else
+		dad->heatmap.scan_start = false;
+
+	/* elem can not be bigger then buffer size */
+	max_read = CY_CMD_RET_PANEL_HDR;
+	max_read += dad->heatmap.num_element * CY_CMD_RET_PANEL_ELMNT_SZ_MAX;
+
+	if (max_read >= CY_MAX_PRBUF_SIZE) {
+		dad->heatmap.num_element =
+			(CY_MAX_PRBUF_SIZE - CY_CMD_RET_PANEL_HDR)
+			/ CY_CMD_RET_PANEL_ELMNT_SZ_MAX;
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: Will get %d element\n",
+			__func__, dad->heatmap.num_element);
+	}
+
+exit:
+	mutex_unlock(&dad->debugfs_lock);
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: return count=%zu\n",
+		__func__, count);
+	return count;
+}
+
+static int tthe_get_panel_data_debugfs_open(struct inode *inode,
+		struct file *filp)
+{
+	struct cyttsp5_device_access_data *dad = inode->i_private;
+
+	mutex_lock(&dad->debugfs_lock);
+
+	if (dad->tthe_get_panel_data_is_open) {
+		mutex_unlock(&dad->debugfs_lock);
+		return -EBUSY;
+	}
+
+	filp->private_data = inode->i_private;
+
+	dad->tthe_get_panel_data_is_open = 1;
+	mutex_unlock(&dad->debugfs_lock);
+	return 0;
+}
+
+static int tthe_get_panel_data_debugfs_close(struct inode *inode,
+		struct file *filp)
+{
+	struct cyttsp5_device_access_data *dad = filp->private_data;
+
+	mutex_lock(&dad->debugfs_lock);
+	filp->private_data = NULL;
+	dad->tthe_get_panel_data_is_open = 0;
+	mutex_unlock(&dad->debugfs_lock);
+
+	return 0;
+}
+
+static const struct file_operations tthe_get_panel_data_fops = {
+	.open = tthe_get_panel_data_debugfs_open,
+	.release = tthe_get_panel_data_debugfs_close,
+	.read = tthe_get_panel_data_debugfs_read,
+	.write = tthe_get_panel_data_debugfs_write,
+};
+#endif
+
+static int cyttsp5_setup_sysfs(struct device *dev)
+{
+	int rc;
+	struct cyttsp5_device_access_data *dad =
+		cyttsp5_get_device_access_data(dev);
+
+	rc = device_create_file(dev, &dev_attr_command);
+	if (rc) {
+		dev_err(dev, "%s: Error, couldn't create command\n", __func__);
+		goto exit;
+	}
+
+	rc = device_create_file(dev, &dev_attr_status);
+	if (rc) {
+		dev_err(dev, "%s: Error, couldn't create status\n", __func__);
+		goto unregister_command;
+	}
+
+	rc = device_create_file(dev, &dev_attr_response);
+	if (rc) {
+		dev_err(dev, "%s: Error, couldn't create response\n", __func__);
+		goto unregister_status;
+	}
+
+	dad->base_dentry = debugfs_create_dir(dev_name(dev), NULL);
+	if (IS_ERR_OR_NULL(dad->base_dentry)) {
+		dev_err(dev, "%s: Error, couldn't create base directory\n",
+			__func__);
+		goto unregister_response;
+	}
+
+	dad->mfg_test_dentry = debugfs_create_dir("mfg_test",
+			dad->base_dentry);
+	if (IS_ERR_OR_NULL(dad->mfg_test_dentry)) {
+		dev_err(dev, "%s: Error, couldn't create mfg_test directory\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("panel_scan", 0600,
+			dad->mfg_test_dentry, dad,
+			&panel_scan_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create panel_scan\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("get_idac", 0600,
+			dad->mfg_test_dentry, dad, &get_idac_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create get_idac\n", __func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("auto_shorts", 0400,
+			dad->mfg_test_dentry, dad,
+			&auto_shorts_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create auto_shorts\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("opens", 0400,
+			dad->mfg_test_dentry, dad, &opens_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create opens\n", __func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("calibrate", 0600,
+			dad->mfg_test_dentry, dad, &calibrate_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create calibrate\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("baseline", 0600,
+			dad->mfg_test_dentry, dad, &baseline_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create baseline\n", __func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("cm_panel", 0400,
+			dad->mfg_test_dentry, dad, &cm_panel_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create cm_panel\n", __func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("cp_panel", 0400,
+			dad->mfg_test_dentry, dad, &cp_panel_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create cp_panel\n", __func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("cm_button", 0400,
+			dad->mfg_test_dentry, dad, &cm_button_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create cm_button\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	if (IS_ERR_OR_NULL(debugfs_create_file("cp_button", 0400,
+			dad->mfg_test_dentry, dad, &cp_button_debugfs_fops))) {
+		dev_err(dev, "%s: Error, couldn't create cp_button\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	dad->cmcp_results_debugfs = debugfs_create_file("cmcp_results", 0644,
+		dad->mfg_test_dentry, dad, &cmcp_results_debugfs_fops);
+	if (IS_ERR_OR_NULL(dad->cmcp_results_debugfs)) {
+		dev_err(dev, "%s: Error, couldn't create cmcp_results\n",
+			__func__);
+		dad->cmcp_results_debugfs = NULL;
+		goto unregister_base_dir;
+	}
+
+#ifdef TTHE_TUNER_SUPPORT
+	dad->tthe_get_panel_data_debugfs = debugfs_create_file(
+			CYTTSP5_TTHE_TUNER_GET_PANEL_DATA_FILE_NAME,
+			0644, NULL, dad, &tthe_get_panel_data_fops);
+	if (IS_ERR_OR_NULL(dad->tthe_get_panel_data_debugfs)) {
+		dev_err(dev, "%s: Error, couldn't create get_panel_data\n",
+			__func__);
+		dad->tthe_get_panel_data_debugfs = NULL;
+		goto unregister_base_dir;
+	}
+#endif
+
+	rc = device_create_file(dev, &dev_attr_cmcp_test);
+	if (rc) {
+		dev_err(dev, "%s: Error, couldn't create cmcp_test\n",
+			__func__);
+		goto unregister_base_dir;
+	}
+
+	rc = device_create_file(dev, &dev_attr_cmcp_threshold_loading);
+	if (rc) {
+		dev_err(dev,
+			"%s: Error, couldn't create cmcp_thresold_loading\n",
+			__func__);
+		goto unregister_cmcp_test;
+	}
+
+	rc = device_create_bin_file(dev, &bin_attr_cmcp_threshold_data);
+	if (rc) {
+		dev_err(dev, "%s: Error, couldn't create cmcp_thresold_data\n",
+			__func__);
+		goto unregister_cmcp_thresold_loading;
+	}
+
+	dad->sysfs_nodes_created = true;
+	return rc;
+
+unregister_cmcp_thresold_loading:
+	device_remove_file(dev, &dev_attr_cmcp_threshold_loading);
+unregister_cmcp_test:
+	device_remove_file(dev, &dev_attr_cmcp_test);
+unregister_base_dir:
+	debugfs_remove_recursive(dad->base_dentry);
+unregister_response:
+	device_remove_file(dev, &dev_attr_response);
+unregister_status:
+	device_remove_file(dev, &dev_attr_status);
+unregister_command:
+	device_remove_file(dev, &dev_attr_command);
+exit:
+	return rc;
+}
+
+static int cyttsp5_setup_sysfs_attention(struct device *dev)
+{
+	struct cyttsp5_device_access_data *dad
+		= cyttsp5_get_device_access_data(dev);
+	int rc = 0;
+
+	dad->si = cmd->request_sysinfo(dev);
+	if (!dad->si)
+		return -EINVAL;
+
+	rc = cyttsp5_setup_sysfs(dev);
+
+	cmd->unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+		CYTTSP5_DEVICE_ACCESS_NAME, cyttsp5_setup_sysfs_attention, 0);
+
+	return rc;
+}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICE_ACCESS_API
+int cyttsp5_device_access_user_command(const char *core_name, u16 read_len,
+		u8 *read_buf, u16 write_len, u8 *write_buf,
+		u16 *actual_read_len)
+{
+	struct cyttsp5_core_data *cd;
+	int rc;
+
+	might_sleep();
+
+	/* Check parameters */
+	if (!read_buf || !write_buf || !actual_read_len)
+		return -EINVAL;
+
+	if (!core_name)
+		core_name = CY_DEFAULT_CORE_ID;
+
+	/* Find device */
+	cd = cyttsp5_get_core_data((char *)core_name);
+	if (!cd) {
+		pr_err("%s: No device.\n", __func__);
+		return -ENODEV;
+	}
+
+	pm_runtime_get_sync(cd->dev);
+	rc = cmd->nonhid_cmd->user_cmd(cd->dev, 1, read_len, read_buf,
+			write_len, write_buf, actual_read_len);
+	pm_runtime_put(cd->dev);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_device_access_user_command);
+
+struct command_work {
+	struct work_struct work;
+	const char *core_name;
+	u16 read_len;
+	u8 *read_buf;
+	u16 write_len;
+	u8 *write_buf;
+
+	void (*cont)(const char *core_name, u16 read_len, u8 *read_buf,
+		u16 write_len, u8 *write_buf, u16 actual_read_length,
+		int rc);
+};
+
+static void cyttsp5_device_access_user_command_work_func(
+		struct work_struct *work)
+{
+	struct command_work *cmd_work =
+			container_of(work, struct command_work, work);
+	u16 actual_read_length;
+	int rc;
+
+	rc = cyttsp5_device_access_user_command(cmd_work->core_name,
+			cmd_work->read_len, cmd_work->read_buf,
+			cmd_work->write_len, cmd_work->write_buf,
+			&actual_read_length);
+
+	if (cmd_work->cont)
+		cmd_work->cont(cmd_work->core_name,
+			cmd_work->read_len, cmd_work->read_buf,
+			cmd_work->write_len, cmd_work->write_buf,
+			actual_read_length, rc);
+
+	kfree(cmd_work);
+}
+
+int cyttsp5_device_access_user_command_async(const char *core_name,
+		u16 read_len, u8 *read_buf, u16 write_len, u8 *write_buf,
+		void (*cont)(const char *core_name, u16 read_len, u8 *read_buf,
+			u16 write_len, u8 *write_buf, u16 actual_read_length,
+			int rc))
+{
+	struct command_work *cmd_work;
+
+	cmd_work = kzalloc(sizeof(*cmd_work), GFP_ATOMIC);
+	if (!cmd_work)
+		return -ENOMEM;
+
+	cmd_work->core_name = core_name;
+	cmd_work->read_len = read_len;
+	cmd_work->read_buf = read_buf;
+	cmd_work->write_len = write_len;
+	cmd_work->write_buf = write_buf;
+	cmd_work->cont = cont;
+
+	INIT_WORK(&cmd_work->work,
+			cyttsp5_device_access_user_command_work_func);
+	schedule_work(&cmd_work->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_device_access_user_command_async);
+#endif
+
+static void cyttsp5_cmcp_parse_threshold_file(const struct firmware *fw,
+		void *context)
+{
+	struct device *dev = context;
+	struct cyttsp5_device_access_data *dad =
+		cyttsp5_get_device_access_data(dev);
+
+	if (!fw) {
+		dev_info(dev, "%s: No builtin cmcp threshold file\n", __func__);
+		goto exit;
+	}
+
+	if (!fw->data || !fw->size) {
+		dev_err(dev, "%s: Invalid builtin cmcp threshold file\n",
+		__func__);
+		goto exit;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: Found cmcp threshold file.\n",
+		__func__);
+
+	cyttsp5_parse_cmcp_threshold_file_common(dev, &fw->data[0], fw->size);
+
+	dad->builtin_cmcp_threshold_status = 0;
+	complete(&dad->builtin_cmcp_threshold_complete);
+	return;
+
+exit:
+	release_firmware(fw);
+
+	dad->builtin_cmcp_threshold_status = -EINVAL;
+	complete(&dad->builtin_cmcp_threshold_complete);
+}
+
+static void cyttsp5_parse_cmcp_threshold_builtin(
+	struct work_struct *cmcp_threshold_update)
+{
+	struct cyttsp5_device_access_data *dad =
+		container_of(cmcp_threshold_update,
+		struct cyttsp5_device_access_data,
+		cmcp_threshold_update);
+	struct device *dev = dad->dev;
+	int retval;
+
+	dad->si = cmd->request_sysinfo(dev);
+	if (!dad->si) {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core\n",
+			__func__);
+		return;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Enabling cmcp threshold class loader built-in\n",
+		__func__);
+
+	/* Open threshold file */
+	retval = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+			CMCP_THRESHOLD_FILE_NAME, dev, GFP_KERNEL, dev,
+			cyttsp5_cmcp_parse_threshold_file);
+	if (retval < 0) {
+		dev_err(dev,
+			"%s: Failed loading cmcp threshold file\n", __func__);
+		/* Try legacy file name */
+		retval = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+				CY_CMCP_THRESHOLD_FILE_NAME, dev, GFP_KERNEL,
+				dev, cyttsp5_cmcp_parse_threshold_file);
+		if (retval < 0) {
+			dev_err(dev,
+				"%s: Fail request cmcp threshold class file\n",
+				__func__);
+			goto exit;
+		}
+	}
+
+	/* wait until cmcp threshold upgrade finishes */
+	wait_for_completion(&dad->builtin_cmcp_threshold_complete);
+
+	retval = dad->builtin_cmcp_threshold_status;
+
+exit:
+	return;
+}
+
+static int cyttsp5_device_access_probe(struct device *dev, void **data)
+{
+	struct cyttsp5_device_access_data *dad;
+	struct configuration *configurations;
+	struct cmcp_data *cmcp_info;
+	struct result *result;
+
+	int tx_num = MAX_TX_SENSORS;
+	int rx_num = MAX_RX_SENSORS;
+	int btn_num = MAX_BUTTONS;
+
+	struct test_case_field *test_case_field_array;
+	struct test_case_search *test_case_search_array;
+	int rc = 0;
+
+	dad = kzalloc(sizeof(*dad), GFP_KERNEL);
+	if (!dad) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_data_failed;
+	}
+
+	configurations = kzalloc(sizeof(*configurations), GFP_KERNEL);
+	if (!configurations) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_configs_failed;
+	}
+	dad->configs = configurations;
+
+	cmcp_info = kzalloc(sizeof(*cmcp_info), GFP_KERNEL);
+	if (!cmcp_info) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_cmcp_info_failed;
+	}
+	dad->cmcp_info = cmcp_info;
+
+	cmcp_info->tx_num = tx_num;
+	cmcp_info->rx_num = rx_num;
+	cmcp_info->btn_num = btn_num;
+
+	result = kzalloc(sizeof(*result), GFP_KERNEL);
+	if (!result) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_result_failed;
+	}
+	dad->result = result;
+
+	test_case_field_array =
+		kzalloc(sizeof(*test_case_field_array) * MAX_CASE_NUM,
+			GFP_KERNEL);
+	if (!test_case_field_array) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_field_array_failed;
+	}
+
+	test_case_search_array =
+		kzalloc(sizeof(*test_case_search_array) * MAX_CASE_NUM,
+			GFP_KERNEL);
+	if (!test_case_search_array) {
+		rc = -ENOMEM;
+		goto cyttsp5_device_access_probe_search_array_failed;
+	}
+
+	cmcp_info->gd_sensor_col = (struct gd_sensor *)
+		kzalloc(tx_num * sizeof(struct gd_sensor), GFP_KERNEL);
+	if (cmcp_info->gd_sensor_col == NULL)
+		goto cyttsp5_device_access_probe_gd_sensor_col_failed;
+
+	cmcp_info->gd_sensor_row = (struct gd_sensor *)
+		kzalloc(rx_num * sizeof(struct gd_sensor), GFP_KERNEL);
+	if (cmcp_info->gd_sensor_row == NULL)
+		goto cyttsp5_device_access_probe_gd_sensor_row_failed;
+
+	cmcp_info->cm_data_panel =
+		kzalloc((tx_num * rx_num + 1) * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cm_data_panel == NULL)
+		goto cyttsp5_device_access_probe_cm_data_panel_failed;
+
+	cmcp_info->cp_tx_data_panel =
+		kzalloc(tx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cp_tx_data_panel == NULL)
+		goto cyttsp5_device_access_probe_cp_tx_data_panel_failed;
+
+	cmcp_info->cp_tx_cal_data_panel =
+		kzalloc(tx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cp_tx_cal_data_panel == NULL)
+		goto cyttsp5_device_access_probe_cp_tx_cal_data_panel_failed;
+
+	cmcp_info->cp_rx_data_panel =
+		kzalloc(rx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cp_rx_data_panel == NULL)
+		goto cyttsp5_device_access_probe_cp_rx_data_panel_failed;
+
+	cmcp_info->cp_rx_cal_data_panel =
+		kzalloc(rx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cp_rx_cal_data_panel == NULL)
+		goto cyttsp5_device_access_probe_cp_rx_cal_data_panel_failed;
+
+	cmcp_info->cm_btn_data = kcalloc(btn_num, sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cm_btn_data == NULL)
+		goto cyttsp5_device_access_probe_cm_btn_data_failed;
+
+	cmcp_info->cp_btn_data = kcalloc(btn_num, sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cp_btn_data == NULL)
+		goto cyttsp5_device_access_probe_cp_btn_data_failed;
+
+	cmcp_info->cm_sensor_column_delta =
+		kzalloc(rx_num * tx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cm_sensor_column_delta == NULL)
+		goto cyttsp5_device_access_probe_cm_sensor_column_delta_failed;
+
+	cmcp_info->cm_sensor_row_delta =
+		kzalloc(tx_num * rx_num * sizeof(int32_t), GFP_KERNEL);
+	if (cmcp_info->cm_sensor_row_delta == NULL)
+		goto cyttsp5_device_access_probe_cm_sensor_row_delta_failed;
+
+	mutex_init(&dad->sysfs_lock);
+	mutex_init(&dad->cmcp_threshold_lock);
+	dad->dev = dev;
+#ifdef TTHE_TUNER_SUPPORT
+	mutex_init(&dad->debugfs_lock);
+	dad->heatmap.num_element = 200;
+#endif
+	*data = dad;
+
+	dad->test_field_array = test_case_field_array;
+	dad->test_search_array = test_case_search_array;
+	dad->test_executed = 0;
+
+	init_completion(&dad->builtin_cmcp_threshold_complete);
+
+	/* get sysinfo */
+	dad->si = cmd->request_sysinfo(dev);
+	if (dad->si) {
+		rc = cyttsp5_setup_sysfs(dev);
+		if (rc)
+			goto cyttsp5_device_access_setup_sysfs_failed;
+	} else {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
+			__func__, dad->si);
+		cmd->subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_DEVICE_ACCESS_NAME,
+			cyttsp5_setup_sysfs_attention, 0);
+	}
+
+	INIT_WORK(&dad->cmcp_threshold_update,
+		cyttsp5_parse_cmcp_threshold_builtin);
+	schedule_work(&dad->cmcp_threshold_update);
+
+	return 0;
+
+cyttsp5_device_access_setup_sysfs_failed:
+	kfree(cmcp_info->cm_sensor_row_delta);
+cyttsp5_device_access_probe_cm_sensor_row_delta_failed:
+	kfree(cmcp_info->cm_sensor_column_delta);
+cyttsp5_device_access_probe_cm_sensor_column_delta_failed:
+	kfree(cmcp_info->cp_btn_data);
+cyttsp5_device_access_probe_cp_btn_data_failed:
+	kfree(cmcp_info->cm_btn_data);
+cyttsp5_device_access_probe_cm_btn_data_failed:
+	kfree(cmcp_info->cp_rx_cal_data_panel);
+cyttsp5_device_access_probe_cp_rx_cal_data_panel_failed:
+	kfree(cmcp_info->cp_rx_data_panel);
+cyttsp5_device_access_probe_cp_rx_data_panel_failed:
+	kfree(cmcp_info->cp_tx_cal_data_panel);
+cyttsp5_device_access_probe_cp_tx_cal_data_panel_failed:
+	kfree(cmcp_info->cp_tx_data_panel);
+cyttsp5_device_access_probe_cp_tx_data_panel_failed:
+	kfree(cmcp_info->cm_data_panel);
+cyttsp5_device_access_probe_cm_data_panel_failed:
+	kfree(cmcp_info->gd_sensor_row);
+cyttsp5_device_access_probe_gd_sensor_row_failed:
+	kfree(cmcp_info->gd_sensor_col);
+cyttsp5_device_access_probe_gd_sensor_col_failed:
+	kfree(test_case_search_array);
+cyttsp5_device_access_probe_search_array_failed:
+	kfree(test_case_field_array);
+cyttsp5_device_access_probe_field_array_failed:
+	kfree(result);
+cyttsp5_device_access_probe_result_failed:
+	kfree(cmcp_info);
+cyttsp5_device_access_probe_cmcp_info_failed:
+	kfree(configurations);
+cyttsp5_device_access_probe_configs_failed:
+	kfree(dad);
+cyttsp5_device_access_probe_data_failed:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+static void cyttsp5_device_access_release(struct device *dev, void *data)
+{
+	struct cyttsp5_device_access_data *dad = data;
+
+	if (dad->sysfs_nodes_created) {
+		device_remove_file(dev, &dev_attr_command);
+		device_remove_file(dev, &dev_attr_status);
+		device_remove_file(dev, &dev_attr_response);
+		debugfs_remove(dad->cmcp_results_debugfs);
+		debugfs_remove_recursive(dad->base_dentry);
+#ifdef TTHE_TUNER_SUPPORT
+		debugfs_remove(dad->tthe_get_panel_data_debugfs);
+#endif
+		device_remove_file(dev, &dev_attr_cmcp_test);
+		device_remove_file(dev, &dev_attr_cmcp_threshold_loading);
+		device_remove_file(dev, &dev_attr_csv_loading);
+		device_remove_bin_file(dev, &bin_attr_cmcp_threshold_data);
+		kfree(dad->cmcp_threshold_data);
+	} else
+		cmd->unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_DEVICE_ACCESS_NAME,
+			cyttsp5_setup_sysfs_attention, 0);
+
+	kfree(dad->test_search_array);
+	kfree(dad->test_field_array);
+	kfree(dad->configs);
+	cyttsp5_free_cmcp_buf(dad->cmcp_info);
+	kfree(dad->cmcp_info);
+	kfree(dad->result);
+	kfree(dad);
+}
+
+static struct cyttsp5_module device_access_module = {
+	.name = CYTTSP5_DEVICE_ACCESS_NAME,
+	.probe = cyttsp5_device_access_probe,
+	.release = cyttsp5_device_access_release,
+};
+
+static int __init cyttsp5_device_access_init(void)
+{
+	int rc;
+
+	cmd = cyttsp5_get_commands();
+	if (!cmd)
+		return -EINVAL;
+
+	rc = cyttsp5_register_module(&device_access_module);
+	if (rc < 0) {
+		pr_err("%s: Error, failed registering module\n",
+			__func__);
+			return rc;
+	}
+
+	pr_info("%s: Parade TTSP Device Access Driver (Built %s) rc=%d\n",
+		__func__, CY_DRIVER_VERSION, rc);
+	return 0;
+}
+module_init(cyttsp5_device_access_init);
+
+static void __exit cyttsp5_device_access_exit(void)
+{
+	cyttsp5_unregister_module(&device_access_module);
+}
+module_exit(cyttsp5_device_access_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product Device Access Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
+
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_devtree.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_devtree.c
new file mode 100644
index 0000000..783082a
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_devtree.c
@@ -0,0 +1,753 @@
+/*
+ * cyttsp5_devtree.c
+ * Parade TrueTouch(TM) Standard Product V5 Device Tree Support Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2013-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+/* cyttsp */
+#include "cyttsp5_regs.h"
+#include <linux/input/cyttsp5_platform.h>
+
+#define ENABLE_VIRTUAL_KEYS
+
+#define MAX_NAME_LENGTH 64
+
+enum cyttsp5_device_type {
+	DEVICE_MT,
+	DEVICE_BTN,
+	DEVICE_PROXIMITY,
+	DEVICE_TYPE_MAX,
+};
+
+struct cyttsp5_device_pdata_func {
+	void * (*create_and_get_pdata)(struct device_node *);
+	void (*free_pdata)(void *);
+};
+
+struct cyttsp5_pdata_ptr {
+	void **pdata;
+};
+
+#ifdef ENABLE_VIRTUAL_KEYS
+static struct kobject *board_properties_kobj;
+
+struct cyttsp5_virtual_keys {
+	struct kobj_attribute kobj_attr;
+	u16 *data;
+	int size;
+};
+#endif
+
+struct cyttsp5_extended_mt_platform_data {
+	struct cyttsp5_mt_platform_data pdata;
+#ifdef ENABLE_VIRTUAL_KEYS
+	struct cyttsp5_virtual_keys vkeys;
+#endif
+};
+
+static inline int get_inp_dev_name(struct device_node *dev_node,
+		const char **inp_dev_name)
+{
+	return of_property_read_string(dev_node, "cy,inp_dev_name",
+			inp_dev_name);
+}
+
+static s16 *create_and_get_u16_array(struct device_node *dev_node,
+		const char *name, int *size)
+{
+	const __be32 *values;
+	s16 *val_array;
+	int len;
+	int sz;
+	int rc;
+	int i;
+
+	values = of_get_property(dev_node, name, &len);
+	if (values == NULL)
+		return NULL;
+
+	sz = len / sizeof(u32);
+	pr_debug("%s: %s size:%d\n", __func__, name, sz);
+
+	val_array = kcalloc(sz, sizeof(s16), GFP_KERNEL);
+	if (!val_array) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < sz; i++)
+		val_array[i] = (s16)be32_to_cpup(values++);
+
+	*size = sz;
+
+	return val_array;
+
+fail:
+	return ERR_PTR(rc);
+}
+
+static struct touch_framework *create_and_get_touch_framework(
+		struct device_node *dev_node)
+{
+	struct touch_framework *frmwrk;
+	s16 *abs;
+	int size;
+	int rc;
+
+	abs = create_and_get_u16_array(dev_node, "cy,abs", &size);
+	if (IS_ERR_OR_NULL(abs))
+		return (void *)abs;
+
+	/* Check for valid abs size */
+	if (size % CY_NUM_ABS_SET) {
+		rc = -EINVAL;
+		goto fail_free_abs;
+	}
+
+	frmwrk = kzalloc(sizeof(*frmwrk), GFP_KERNEL);
+	if (!frmwrk) {
+		rc = -ENOMEM;
+		goto fail_free_abs;
+	}
+
+	frmwrk->abs = abs;
+	frmwrk->size = size;
+
+	return frmwrk;
+
+fail_free_abs:
+	kfree(abs);
+
+	return ERR_PTR(rc);
+}
+
+static void free_touch_framework(struct touch_framework *frmwrk)
+{
+	kfree(frmwrk->abs);
+	kfree(frmwrk);
+}
+
+#ifdef ENABLE_VIRTUAL_KEYS
+#define VIRTUAL_KEY_ELEMENT_SIZE 5
+
+static ssize_t virtual_keys_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	struct cyttsp5_virtual_keys *vkeys = container_of(attr,
+		struct cyttsp5_virtual_keys, kobj_attr);
+	u16 *data = vkeys->data;
+	int size = vkeys->size;
+	int index;
+	int i;
+
+	index = 0;
+	for (i = 0; i < size; i += VIRTUAL_KEY_ELEMENT_SIZE)
+		index += scnprintf(buf + index, CY_MAX_PRBUF_SIZE - index,
+			"0x01:%d:%d:%d:%d:%d\n",
+			data[i], data[i+1], data[i+2], data[i+3], data[i+4]);
+
+	return index;
+}
+
+static int setup_virtual_keys(struct device_node *dev_node,
+		const char *inp_dev_name, struct cyttsp5_virtual_keys *vkeys)
+{
+	char *name;
+	u16 *data;
+	int size;
+	int rc;
+
+	data = create_and_get_u16_array(dev_node, "cy,virtual_keys", &size);
+	if (data == NULL)
+		return 0;
+	else if (IS_ERR(data)) {
+		rc = PTR_ERR(data);
+		goto fail;
+	}
+
+	/* Check for valid virtual keys size */
+	if (size % VIRTUAL_KEY_ELEMENT_SIZE) {
+		rc = -EINVAL;
+		goto fail_free_data;
+	}
+
+	name = kzalloc(MAX_NAME_LENGTH, GFP_KERNEL);
+	if (!name) {
+		rc = -ENOMEM;
+		goto fail_free_data;
+	}
+
+	snprintf(name, MAX_NAME_LENGTH, "virtualkeys.%s", inp_dev_name);
+
+	vkeys->data = data;
+	vkeys->size = size;
+
+	/* TODO: Instantiate in board file and export it */
+	if (board_properties_kobj == NULL)
+		board_properties_kobj =
+			kobject_create_and_add("board_properties", NULL);
+	if (board_properties_kobj == NULL) {
+		pr_err("%s: Cannot get board_properties kobject!\n", __func__);
+		rc = -EINVAL;
+		goto fail_free_name;
+	}
+
+	/* Initialize dynamic SysFs attribute */
+	sysfs_attr_init(&vkeys->kobj_attr.attr);
+	vkeys->kobj_attr.attr.name = name;
+	vkeys->kobj_attr.attr.mode = 0444;
+	vkeys->kobj_attr.show = virtual_keys_show;
+
+	rc = sysfs_create_file(board_properties_kobj, &vkeys->kobj_attr.attr);
+	if (rc)
+		goto fail_del_kobj;
+
+	return 0;
+
+fail_del_kobj:
+	kobject_del(board_properties_kobj);
+fail_free_name:
+	kfree(name);
+	vkeys->kobj_attr.attr.name = NULL;
+fail_free_data:
+	kfree(data);
+	vkeys->data = NULL;
+fail:
+	return rc;
+}
+
+static void free_virtual_keys(struct cyttsp5_virtual_keys *vkeys)
+{
+	if (board_properties_kobj)
+		sysfs_remove_file(board_properties_kobj,
+			&vkeys->kobj_attr.attr);
+
+
+	kobject_del(board_properties_kobj);
+	board_properties_kobj = NULL;
+
+	kfree(vkeys->data);
+	kfree(vkeys->kobj_attr.attr.name);
+}
+#endif
+
+static void *create_and_get_mt_pdata(struct device_node *dev_node)
+{
+	struct cyttsp5_extended_mt_platform_data *ext_pdata;
+	struct cyttsp5_mt_platform_data *pdata;
+	u32 value;
+	int rc;
+
+	ext_pdata = kzalloc(sizeof(*ext_pdata), GFP_KERNEL);
+	if (!ext_pdata) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	pdata = &ext_pdata->pdata;
+
+	rc = get_inp_dev_name(dev_node, &pdata->inp_dev_name);
+	if (rc)
+		goto fail_free_pdata;
+
+	/* Optional fields */
+	rc = of_property_read_u32(dev_node, "cy,flags", &value);
+	if (!rc)
+		pdata->flags = value;
+
+	rc = of_property_read_u32(dev_node, "cy,vkeys_x", &value);
+	if (!rc)
+		pdata->vkeys_x = value;
+
+	rc = of_property_read_u32(dev_node, "cy,vkeys_y", &value);
+	if (!rc)
+		pdata->vkeys_y = value;
+
+	/* Required fields */
+	pdata->frmwrk = create_and_get_touch_framework(dev_node);
+	if (pdata->frmwrk == NULL) {
+		rc = -EINVAL;
+		goto fail_free_pdata;
+	} else if (IS_ERR(pdata->frmwrk)) {
+		rc = PTR_ERR(pdata->frmwrk);
+		goto fail_free_pdata;
+	}
+#ifdef ENABLE_VIRTUAL_KEYS
+	rc = setup_virtual_keys(dev_node, pdata->inp_dev_name,
+			&ext_pdata->vkeys);
+	if (rc) {
+		pr_err("%s: Cannot setup virtual keys!\n", __func__);
+		goto fail_free_all;
+	}
+#endif
+	return pdata;
+
+fail_free_all:
+	kfree(ext_pdata->vkeys.kobj_attr.attr.name);
+	kfree(pdata->frmwrk);
+fail_free_pdata:
+	kfree(ext_pdata);
+fail:
+	return ERR_PTR(rc);
+}
+
+static void free_mt_pdata(void *pdata)
+{
+	struct cyttsp5_mt_platform_data *mt_pdata =
+		(struct cyttsp5_mt_platform_data *)pdata;
+	struct cyttsp5_extended_mt_platform_data *ext_mt_pdata =
+		container_of(mt_pdata,
+			struct cyttsp5_extended_mt_platform_data, pdata);
+
+	free_touch_framework(mt_pdata->frmwrk);
+#ifdef ENABLE_VIRTUAL_KEYS
+	free_virtual_keys(&ext_mt_pdata->vkeys);
+#endif
+	kfree(ext_mt_pdata);
+}
+
+static void *create_and_get_btn_pdata(struct device_node *dev_node)
+{
+	struct cyttsp5_btn_platform_data *pdata;
+	int rc;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	rc = get_inp_dev_name(dev_node, &pdata->inp_dev_name);
+	if (rc)
+		goto fail_free_pdata;
+
+	return pdata;
+
+fail_free_pdata:
+	kfree(pdata);
+fail:
+	return ERR_PTR(rc);
+}
+
+static void free_btn_pdata(void *pdata)
+{
+	struct cyttsp5_btn_platform_data *btn_pdata =
+		(struct cyttsp5_btn_platform_data *)pdata;
+
+	kfree(btn_pdata);
+}
+
+static void *create_and_get_proximity_pdata(struct device_node *dev_node)
+{
+	struct cyttsp5_proximity_platform_data *pdata;
+	int rc;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	rc = get_inp_dev_name(dev_node, &pdata->inp_dev_name);
+	if (rc)
+		goto fail_free_pdata;
+
+	pdata->frmwrk = create_and_get_touch_framework(dev_node);
+	if (pdata->frmwrk == NULL) {
+		rc = -EINVAL;
+		goto fail_free_pdata;
+	} else if (IS_ERR(pdata->frmwrk)) {
+		rc = PTR_ERR(pdata->frmwrk);
+		goto fail_free_pdata;
+	}
+
+	return pdata;
+
+fail_free_pdata:
+	kfree(pdata);
+fail:
+	return ERR_PTR(rc);
+}
+
+static void free_proximity_pdata(void *pdata)
+{
+	struct cyttsp5_proximity_platform_data *proximity_pdata =
+		(struct cyttsp5_proximity_platform_data *)pdata;
+
+	free_touch_framework(proximity_pdata->frmwrk);
+
+	kfree(proximity_pdata);
+}
+
+static struct cyttsp5_device_pdata_func device_pdata_funcs[DEVICE_TYPE_MAX] = {
+	[DEVICE_MT] = {
+		.create_and_get_pdata = create_and_get_mt_pdata,
+		.free_pdata = free_mt_pdata,
+	},
+	[DEVICE_BTN] = {
+		.create_and_get_pdata = create_and_get_btn_pdata,
+		.free_pdata = free_btn_pdata,
+	},
+	[DEVICE_PROXIMITY] = {
+		.create_and_get_pdata = create_and_get_proximity_pdata,
+		.free_pdata = free_proximity_pdata,
+	},
+};
+
+static struct cyttsp5_pdata_ptr pdata_ptr[DEVICE_TYPE_MAX];
+
+static const char *device_names[DEVICE_TYPE_MAX] = {
+	[DEVICE_MT] = "cy,mt",
+	[DEVICE_BTN] = "cy,btn",
+	[DEVICE_PROXIMITY] = "cy,proximity",
+};
+
+static void set_pdata_ptr(struct cyttsp5_platform_data *pdata)
+{
+	pdata_ptr[DEVICE_MT].pdata = (void **)&pdata->mt_pdata;
+	pdata_ptr[DEVICE_BTN].pdata = (void **)&pdata->btn_pdata;
+	pdata_ptr[DEVICE_PROXIMITY].pdata = (void **)&pdata->prox_pdata;
+}
+
+static int get_device_type(struct device_node *dev_node,
+		enum cyttsp5_device_type *type)
+{
+	const char *name;
+	enum cyttsp5_device_type t;
+	int rc;
+
+	rc = of_property_read_string(dev_node, "name", &name);
+	if (rc)
+		return rc;
+
+	for (t = 0; t < DEVICE_TYPE_MAX; t++)
+		if (!strncmp(name, device_names[t], MAX_NAME_LENGTH)) {
+			*type = t;
+			return 0;
+		}
+
+	return -EINVAL;
+}
+
+static inline void *create_and_get_device_pdata(struct device_node *dev_node,
+		enum cyttsp5_device_type type)
+{
+	return device_pdata_funcs[type].create_and_get_pdata(dev_node);
+}
+
+static inline void free_device_pdata(enum cyttsp5_device_type type)
+{
+	device_pdata_funcs[type].free_pdata(*pdata_ptr[type].pdata);
+}
+
+static struct touch_settings *create_and_get_touch_setting(
+		struct device_node *core_node, const char *name)
+{
+	struct touch_settings *setting;
+	char *tag_name;
+	u32 tag_value;
+	u16 *data;
+	int size;
+	int rc;
+
+	data = create_and_get_u16_array(core_node, name, &size);
+	if (IS_ERR_OR_NULL(data))
+		return (void *)data;
+
+	pr_debug("%s: Touch setting:'%s' size:%d\n", __func__, name, size);
+
+	setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+	if (!setting) {
+		rc = -ENOMEM;
+		goto fail_free_data;
+	}
+
+	setting->data = (u8 *)data;
+	setting->size = size;
+
+	tag_name = kzalloc(MAX_NAME_LENGTH, GFP_KERNEL);
+	if (!tag_name) {
+		rc = -ENOMEM;
+		goto fail_free_setting;
+	}
+
+	snprintf(tag_name, MAX_NAME_LENGTH, "%s-tag", name);
+
+	rc = of_property_read_u32(core_node, tag_name, &tag_value);
+	if (!rc)
+		setting->tag = tag_value;
+
+	kfree(tag_name);
+
+	return setting;
+
+fail_free_setting:
+	kfree(setting);
+fail_free_data:
+	kfree(data);
+
+	return ERR_PTR(rc);
+}
+
+static void free_touch_setting(struct touch_settings *setting)
+{
+	if (setting) {
+		kfree(setting->data);
+		kfree(setting);
+	}
+}
+
+static char *touch_setting_names[CY_IC_GRPNUM_NUM] = {
+	NULL,			/* CY_IC_GRPNUM_RESERVED */
+	"cy,cmd_regs",		/* CY_IC_GRPNUM_CMD_REGS */
+	"cy,tch_rep",		/* CY_IC_GRPNUM_TCH_REP */
+	"cy,data_rec",		/* CY_IC_GRPNUM_DATA_REC */
+	"cy,test_rec",		/* CY_IC_GRPNUM_TEST_REC */
+	"cy,pcfg_rec",		/* CY_IC_GRPNUM_PCFG_REC */
+	"cy,tch_parm_val",	/* CY_IC_GRPNUM_TCH_PARM_VAL */
+	"cy,tch_parm_size",	/* CY_IC_GRPNUM_TCH_PARM_SIZE */
+	NULL,			/* CY_IC_GRPNUM_RESERVED1 */
+	NULL,			/* CY_IC_GRPNUM_RESERVED2 */
+	"cy,opcfg_rec",		/* CY_IC_GRPNUM_OPCFG_REC */
+	"cy,ddata_rec",		/* CY_IC_GRPNUM_DDATA_REC */
+	"cy,mdata_rec",		/* CY_IC_GRPNUM_MDATA_REC */
+	"cy,test_regs",		/* CY_IC_GRPNUM_TEST_REGS */
+	"cy,btn_keys",		/* CY_IC_GRPNUM_BTN_KEYS */
+	NULL,			/* CY_IC_GRPNUM_TTHE_REGS */
+};
+
+static struct cyttsp5_core_platform_data *create_and_get_core_pdata(
+		struct device_node *core_node)
+{
+	struct cyttsp5_core_platform_data *pdata;
+	u32 value;
+	int rc;
+	int i;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	/* Required fields */
+	rc = of_property_read_u32(core_node, "cy,irq_gpio", &value);
+	if (rc)
+		goto fail_free;
+	pdata->irq_gpio = value;
+
+	rc = of_property_read_u32(core_node, "cy,hid_desc_register", &value);
+	if (rc)
+		goto fail_free;
+	pdata->hid_desc_register = value;
+
+	/* Optional fields */
+	/* rst_gpio is optional since a platform may use
+	 * power cycling instead of using the XRES pin
+	 */
+	rc = of_property_read_u32(core_node, "cy,rst_gpio", &value);
+	if (!rc)
+		pdata->rst_gpio = value;
+
+	rc = of_property_read_u32(core_node, "cy,level_irq_udelay", &value);
+	if (!rc)
+		pdata->level_irq_udelay = value;
+
+	rc = of_property_read_u32(core_node, "cy,vendor_id", &value);
+	if (!rc)
+		pdata->vendor_id = value;
+
+	rc = of_property_read_u32(core_node, "cy,product_id", &value);
+	if (!rc)
+		pdata->product_id = value;
+
+	rc = of_property_read_u32(core_node, "cy,flags", &value);
+	if (!rc)
+		pdata->flags = value;
+
+	rc = of_property_read_u32(core_node, "cy,easy_wakeup_gesture", &value);
+	if (!rc)
+		pdata->easy_wakeup_gesture = (u8)value;
+
+	for (i = 0; (unsigned int)i < ARRAY_SIZE(touch_setting_names); i++) {
+		if (touch_setting_names[i] == NULL)
+			continue;
+
+		pdata->sett[i] = create_and_get_touch_setting(core_node,
+				touch_setting_names[i]);
+		if (IS_ERR(pdata->sett[i])) {
+			rc = PTR_ERR(pdata->sett[i]);
+			goto fail_free_sett;
+		} else if (pdata->sett[i] == NULL)
+			pr_debug("%s: No data for setting '%s'\n", __func__,
+				touch_setting_names[i]);
+	}
+
+	pr_debug("%s: irq_gpio:%d rst_gpio:%d\n",
+		__func__, pdata->irq_gpio, pdata->rst_gpio);
+
+	pr_debug("hid_desc_register:%d irq_udelay:%d vid:%d pid:%d\n",
+		pdata->hid_desc_register,
+		pdata->level_irq_udelay, pdata->vendor_id, pdata->product_id);
+
+	pr_debug("flags:%d easy_wakeup_gesture:%d\n",
+		pdata->flags, pdata->easy_wakeup_gesture);
+
+	pdata->xres = cyttsp5_xres;
+	pdata->init = cyttsp5_init;
+	pdata->power = cyttsp5_power;
+	pdata->detect = cyttsp5_detect;
+	pdata->irq_stat = cyttsp5_irq_stat;
+
+	return pdata;
+
+fail_free_sett:
+	for (i--; i >= 0; i--)
+		free_touch_setting(pdata->sett[i]);
+fail_free:
+	kfree(pdata);
+fail:
+	return ERR_PTR(rc);
+}
+
+static void free_core_pdata(void *pdata)
+{
+	struct cyttsp5_core_platform_data *core_pdata = pdata;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(touch_setting_names); i++)
+		free_touch_setting(core_pdata->sett[i]);
+	kfree(core_pdata);
+}
+
+int cyttsp5_devtree_create_and_get_pdata(struct device *adap_dev)
+{
+	struct cyttsp5_platform_data *pdata;
+	struct device_node *core_node, *dev_node, *dev_node_fail;
+	enum cyttsp5_device_type type;
+	int count = 0;
+	int rc = 0;
+
+	if (!adap_dev->of_node)
+		return 0;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	adap_dev->platform_data = pdata;
+	set_pdata_ptr(pdata);
+
+	/* There should be only one core node */
+	for_each_child_of_node(adap_dev->of_node, core_node) {
+		const char *name;
+
+		rc = of_property_read_string(core_node, "name", &name);
+		if (!rc)
+			pr_debug("%s: name:%s\n", __func__, name);
+
+		pdata->core_pdata = create_and_get_core_pdata(core_node);
+		if (IS_ERR(pdata->core_pdata)) {
+			rc = PTR_ERR(pdata->core_pdata);
+			break;
+		}
+
+		/* Increment reference count */
+		of_node_get(core_node);
+
+		for_each_child_of_node(core_node, dev_node) {
+			count++;
+			rc = get_device_type(dev_node, &type);
+			if (rc)
+				break;
+			*pdata_ptr[type].pdata
+				= create_and_get_device_pdata(dev_node, type);
+			if (IS_ERR(*pdata_ptr[type].pdata))
+				rc = PTR_ERR(*pdata_ptr[type].pdata);
+			if (rc)
+				break;
+			/* Increment reference count */
+			of_node_get(dev_node);
+		}
+
+		if (rc) {
+			free_core_pdata(pdata->core_pdata);
+			of_node_put(core_node);
+			for_each_child_of_node(core_node, dev_node_fail) {
+				if (dev_node == dev_node_fail)
+					break;
+				rc = get_device_type(dev_node, &type);
+				if (rc)
+					break;
+				free_device_pdata(type);
+				of_node_put(dev_node);
+			}
+			break;
+		}
+		pdata->loader_pdata = &_cyttsp5_loader_platform_data;
+	}
+
+	pr_debug("%s: %d child node(s) found\n", __func__, count);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_devtree_create_and_get_pdata);
+
+int cyttsp5_devtree_clean_pdata(struct device *adap_dev)
+{
+	struct cyttsp5_platform_data *pdata;
+	struct device_node *core_node, *dev_node;
+	enum cyttsp5_device_type type;
+	int rc = 0;
+
+	if (!adap_dev->of_node)
+		return 0;
+
+	pdata = dev_get_platdata(adap_dev);
+	set_pdata_ptr(pdata);
+	for_each_child_of_node(adap_dev->of_node, core_node) {
+		free_core_pdata(pdata->core_pdata);
+		of_node_put(core_node);
+		for_each_child_of_node(core_node, dev_node) {
+			rc = get_device_type(dev_node, &type);
+			if (rc)
+				break;
+			free_device_pdata(type);
+			of_node_put(dev_node);
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(cyttsp5_devtree_clean_pdata);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product DeviceTree Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_i2c.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_i2c.c
new file mode 100644
index 0000000..93074116
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_i2c.c
@@ -0,0 +1,205 @@
+/*
+ * cyttsp5_i2c.c
+ * Parade TrueTouch(TM) Standard Product V5 I2C Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include "cyttsp5_regs.h"
+
+#define CY_I2C_DATA_SIZE  (2 * 256)
+
+static int cyttsp5_i2c_read_default(struct device *dev, void *buf, int size)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int rc;
+
+	if (!buf || !size || size > CY_I2C_DATA_SIZE)
+		return -EINVAL;
+
+	rc = i2c_master_recv(client, buf, size);
+
+	return (rc < 0) ? rc : rc != size ? -EIO : 0;
+}
+
+static int cyttsp5_i2c_read_default_nosize(struct device *dev, u8 *buf, u32 max)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_msg msgs[2];
+	u8 msg_count = 1;
+	int rc;
+	u32 size;
+
+	if (!buf)
+		return -EINVAL;
+
+	msgs[0].addr = client->addr;
+	msgs[0].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
+	msgs[0].len = 2;
+	msgs[0].buf = buf;
+	rc = i2c_transfer(client->adapter, msgs, msg_count);
+	if (rc < 0 || rc != msg_count)
+		return (rc < 0) ? rc : -EIO;
+
+	size = get_unaligned_le16(&buf[0]);
+	if (!size || size == 2 || size >= CY_PIP_1P7_EMPTY_BUF)
+		/*
+		 * Before PIP 1.7, empty buffer is 0x0002;
+		 * From PIP 1.7, empty buffer is 0xFFXX
+		 */
+		return 0;
+
+	if (size > max)
+		return -EINVAL;
+
+	rc = i2c_master_recv(client, buf, size);
+
+	return (rc < 0) ? rc : rc != (int)size ? -EIO : 0;
+}
+
+static int cyttsp5_i2c_write_read_specific(struct device *dev, u8 write_len,
+		u8 *write_buf, u8 *read_buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_msg msgs[2];
+	u8 msg_count = 1;
+	int rc;
+
+	if (!write_buf || !write_len)
+		return -EINVAL;
+
+	msgs[0].addr = client->addr;
+	msgs[0].flags = client->flags & I2C_M_TEN;
+	msgs[0].len = write_len;
+	msgs[0].buf = write_buf;
+	rc = i2c_transfer(client->adapter, msgs, msg_count);
+
+	if (rc < 0 || rc != msg_count)
+		return (rc < 0) ? rc : -EIO;
+
+	rc = 0;
+
+	if (read_buf)
+		rc = cyttsp5_i2c_read_default_nosize(dev, read_buf,
+				CY_I2C_DATA_SIZE);
+
+	return rc;
+}
+
+static struct cyttsp5_bus_ops cyttsp5_i2c_bus_ops = {
+	.bustype = BUS_I2C,
+	.read_default = cyttsp5_i2c_read_default,
+	.read_default_nosize = cyttsp5_i2c_read_default_nosize,
+	.write_read_specific = cyttsp5_i2c_write_read_specific,
+};
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+static const struct of_device_id cyttsp5_i2c_of_match[] = {
+	{ .compatible = "cy,cyttsp5_i2c_adapter", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cyttsp5_i2c_of_match);
+#endif
+
+static int cyttsp5_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *i2c_id)
+{
+	struct device *dev = &client->dev;
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	const struct of_device_id *match;
+#endif
+	int rc;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(dev, "I2C functionality not Supported\n");
+		return -EIO;
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	match = of_match_device(of_match_ptr(cyttsp5_i2c_of_match), dev);
+	if (match) {
+		rc = cyttsp5_devtree_create_and_get_pdata(dev);
+		if (rc < 0)
+			return rc;
+	}
+#endif
+
+	rc = cyttsp5_probe(&cyttsp5_i2c_bus_ops, &client->dev, client->irq,
+			CY_I2C_DATA_SIZE);
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	if (rc && match)
+		cyttsp5_devtree_clean_pdata(dev);
+#endif
+
+	dev_info(dev, "cyttsp5_core_probe finish\n");
+	return rc;
+}
+
+static int cyttsp5_i2c_remove(struct i2c_client *client)
+{
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	struct device *dev = &client->dev;
+	const struct of_device_id *match;
+#endif
+	struct cyttsp5_core_data *cd = i2c_get_clientdata(client);
+
+	cyttsp5_release(cd);
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	match = of_match_device(of_match_ptr(cyttsp5_i2c_of_match), dev);
+	if (match)
+		cyttsp5_devtree_clean_pdata(dev);
+#endif
+
+	return 0;
+}
+
+static const struct i2c_device_id cyttsp5_i2c_id[] = {
+	{ CYTTSP5_I2C_NAME, 0, },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, cyttsp5_i2c_id);
+
+static struct i2c_driver cyttsp5_i2c_driver = {
+	.driver = {
+		.name = CYTTSP5_I2C_NAME,
+		.owner = THIS_MODULE,
+		.pm = &cyttsp5_pm_ops,
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+		.of_match_table = cyttsp5_i2c_of_match,
+#endif
+	},
+	.probe = cyttsp5_i2c_probe,
+	.remove = cyttsp5_i2c_remove,
+	.id_table = cyttsp5_i2c_id,
+};
+
+module_i2c_driver(cyttsp5_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product I2C driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_loader.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_loader.c
new file mode 100644
index 0000000..b548489
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_loader.c
@@ -0,0 +1,1601 @@
+ /*
+ * cyttsp5_loader.c
+ * Parade TrueTouch(TM) Standard Product V5 FW Loader Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/firmware.h>
+#include "cyttsp5_regs.h"
+
+#define CYTTSP5_LOADER_NAME "cyttsp5_loader"
+#define CY_FW_MANUAL_UPGRADE_FILE_NAME "cyttsp5_fw_manual_upgrade"
+
+/* Enable UPGRADE_FW_AND_CONFIG_IN_PROBE definition
+ * to perform FW and config upgrade during probe
+ * instead of scheduling a work for it
+ */
+/* #define UPGRADE_FW_AND_CONFIG_IN_PROBE */
+
+#define CYTTSP5_AUTO_LOAD_FOR_CORRUPTED_FW 1
+#define CYTTSP5_LOADER_FW_UPGRADE_RETRY_COUNT 3
+
+#define CYTTSP5_FW_UPGRADE \
+	(defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE) \
+	|| defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE))
+
+#define CYTTSP5_TTCONFIG_UPGRADE \
+	(defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE) \
+	|| defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE))
+
+static const u8 cyttsp5_security_key[] = {
+	0xA5, 0x01, 0x02, 0x03, 0xFF, 0xFE, 0xFD, 0x5A
+};
+
+/* Timeout values in ms. */
+#define CY_LDR_REQUEST_EXCLUSIVE_TIMEOUT  500
+#define CY_LDR_SWITCH_TO_APP_MODE_TIMEOUT 300
+
+#define CY_MAX_STATUS_SIZE                32
+
+#define CY_DATA_MAX_ROW_SIZE              256
+#define CY_DATA_ROW_SIZE                  128
+
+#define CY_ARRAY_ID_OFFSET                0
+#define CY_ROW_NUM_OFFSET                 1
+#define CY_ROW_SIZE_OFFSET                3
+#define CY_ROW_DATA_OFFSET                5
+
+#define CY_POST_TT_CFG_CRC_MASK           0x2
+
+struct cyttsp5_loader_data {
+	struct device *dev;
+	struct cyttsp5_sysinfo *si;
+	u8 status_buf[CY_MAX_STATUS_SIZE];
+	struct completion int_running;
+	struct completion calibration_complete;
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	struct completion builtin_bin_fw_complete;
+	int builtin_bin_fw_status;
+	bool is_manual_upgrade_enabled;
+#endif
+	struct work_struct fw_and_config_upgrade;
+	struct work_struct calibration_work;
+	struct cyttsp5_loader_platform_data *loader_pdata;
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+	struct mutex config_lock;
+	u8 *config_data;
+	int config_size;
+	bool config_loading;
+#endif
+};
+
+struct cyttsp5_dev_id {
+	u32 silicon_id;
+	u8 rev_id;
+	u32 bl_ver;
+};
+
+struct cyttsp5_hex_image {
+	u8 array_id;
+	u16 row_num;
+	u16 row_size;
+	u8 row_data[CY_DATA_ROW_SIZE];
+} __packed;
+
+static struct cyttsp5_core_commands *cmd;
+
+static struct cyttsp5_module loader_module;
+
+static inline struct cyttsp5_loader_data *cyttsp5_get_loader_data(
+		struct device *dev)
+{
+	return cyttsp5_get_module_data(dev, &loader_module);
+}
+
+#if CYTTSP5_FW_UPGRADE \
+	|| defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE)
+static u8 cyttsp5_get_panel_id(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return cd->panel_id;
+}
+#endif
+
+#if CYTTSP5_FW_UPGRADE || CYTTSP5_TTCONFIG_UPGRADE
+/*
+ * return code:
+ * -1: Do not upgrade firmware
+ *  0: Version info same, let caller decide
+ *  1: Do a firmware upgrade
+ */
+static int cyttsp5_check_firmware_version(struct device *dev,
+		u32 fw_ver_new, u32 fw_revctrl_new)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u32 fw_ver_img;
+	u32 fw_revctrl_img;
+
+	fw_ver_img = ld->si->cydata.fw_ver_major << 8;
+	fw_ver_img += ld->si->cydata.fw_ver_minor;
+
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: img vers:0x%04X new vers:0x%04X\n", __func__,
+		fw_ver_img, fw_ver_new);
+
+	if (fw_ver_new > fw_ver_img) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Image is newer, will upgrade\n", __func__);
+		return 1;
+	}
+
+	if (fw_ver_new < fw_ver_img) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Image is older, will NOT upgrade\n", __func__);
+		return -EPERM;
+	}
+
+	fw_revctrl_img = ld->si->cydata.revctrl;
+
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: img revctrl:0x%04X new revctrl:0x%04X\n",
+		__func__, fw_revctrl_img, fw_revctrl_new);
+
+	if (fw_revctrl_new > fw_revctrl_img) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Image is newer, will upgrade\n", __func__);
+		return 1;
+	}
+
+	if (fw_revctrl_new < fw_revctrl_img) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Image is older, will NOT upgrade\n", __func__);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static void cyttsp5_calibrate_idacs(struct work_struct *calibration_work)
+{
+	struct cyttsp5_loader_data *ld = container_of(calibration_work,
+			struct cyttsp5_loader_data, calibration_work);
+	struct device *dev = ld->dev;
+	u8 mode;
+	u8 status;
+	int rc;
+
+	rc = cmd->request_exclusive(dev, CY_LDR_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0)
+		goto exit;
+
+	rc = cmd->nonhid_cmd->suspend_scanning(dev, 0);
+	if (rc < 0)
+		goto release;
+
+	for (mode = 0; mode < 3; mode++) {
+		rc = cmd->nonhid_cmd->calibrate_idacs(dev, 0, mode, &status);
+		if (rc < 0)
+			goto release;
+	}
+
+	rc = cmd->nonhid_cmd->resume_scanning(dev, 0);
+	if (rc < 0)
+		goto release;
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: Calibration Done\n", __func__);
+
+release:
+	cmd->release_exclusive(dev);
+exit:
+	complete(&ld->calibration_complete);
+}
+
+static int cyttsp5_calibration_attention(struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	int rc = 0;
+
+	schedule_work(&ld->calibration_work);
+
+	cmd->unsubscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_LOADER_NAME,
+		cyttsp5_calibration_attention, 0);
+
+	return rc;
+}
+
+
+#endif /* CYTTSP5_FW_UPGRADE || CYTTSP5_TTCONFIG_UPGRADE */
+
+#if CYTTSP5_FW_UPGRADE
+static u8 *cyttsp5_get_row_(struct device *dev, u8 *row_buf,
+		u8 *image_buf, int size)
+{
+	memcpy(row_buf, image_buf, size);
+	return image_buf + size;
+}
+
+static int cyttsp5_ldr_enter_(struct device *dev, struct cyttsp5_dev_id *dev_id)
+{
+	int rc;
+	u8 return_data[8];
+	u8 mode;
+
+	dev_id->silicon_id = 0;
+	dev_id->rev_id = 0;
+	dev_id->bl_ver = 0;
+
+	cmd->request_reset(dev);
+
+	rc = cmd->request_get_mode(dev, 0, &mode);
+	if (rc < 0)
+		return rc;
+
+	if (mode == CY_MODE_UNKNOWN)
+		return -EINVAL;
+
+	if (mode == CY_MODE_OPERATIONAL) {
+		rc = cmd->nonhid_cmd->start_bl(dev, 0);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = cmd->nonhid_cmd->get_bl_info(dev, 0, return_data);
+	if (rc < 0)
+		return rc;
+
+	dev_id->silicon_id = get_unaligned_le32(&return_data[0]);
+	dev_id->rev_id = return_data[4];
+	dev_id->bl_ver = return_data[5] + (return_data[6] << 8)
+		+ (return_data[7] << 16);
+
+	return 0;
+}
+
+static int cyttsp5_ldr_init_(struct device *dev,
+		struct cyttsp5_hex_image *row_image)
+{
+	return cmd->nonhid_cmd->initiate_bl(dev, 0, 8,
+			(u8 *)cyttsp5_security_key, row_image->row_size,
+			row_image->row_data);
+}
+
+static int cyttsp5_ldr_parse_row_(struct device *dev, u8 *row_buf,
+	struct cyttsp5_hex_image *row_image)
+{
+	int rc = 0;
+
+	row_image->array_id = row_buf[CY_ARRAY_ID_OFFSET];
+	row_image->row_num = get_unaligned_be16(&row_buf[CY_ROW_NUM_OFFSET]);
+	row_image->row_size = get_unaligned_be16(&row_buf[CY_ROW_SIZE_OFFSET]);
+
+	if (row_image->row_size > ARRAY_SIZE(row_image->row_data)) {
+		dev_err(dev, "%s: row data buffer overflow\n", __func__);
+		rc = -EOVERFLOW;
+		goto cyttsp5_ldr_parse_row_exit;
+	}
+
+	memcpy(row_image->row_data, &row_buf[CY_ROW_DATA_OFFSET],
+		row_image->row_size);
+cyttsp5_ldr_parse_row_exit:
+	return rc;
+}
+
+static int cyttsp5_ldr_prog_row_(struct device *dev,
+		struct cyttsp5_hex_image *row_image)
+{
+	u16 length = row_image->row_size + 3;
+	u8 data[3 + row_image->row_size];
+	u8 offset = 0;
+
+	data[offset++] = row_image->array_id;
+	data[offset++] = LOW_BYTE(row_image->row_num);
+	data[offset++] = HI_BYTE(row_image->row_num);
+	memcpy(data + 3, row_image->row_data, row_image->row_size);
+	return cmd->nonhid_cmd->prog_and_verify(dev, 0, length, data);
+}
+
+static int cyttsp5_ldr_verify_chksum_(struct device *dev)
+{
+	u8 result;
+	int rc;
+
+	rc = cmd->nonhid_cmd->verify_app_integrity(dev, 0, &result);
+	if (rc)
+		return rc;
+
+	/* fail */
+	if (result == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cyttsp5_ldr_exit_(struct device *dev)
+{
+	return cmd->nonhid_cmd->launch_app(dev, 0);
+}
+
+static int cyttsp5_load_app_(struct device *dev, const u8 *fw, int fw_size)
+{
+	struct cyttsp5_dev_id *dev_id;
+	struct cyttsp5_hex_image *row_image;
+	u8 *row_buf;
+	size_t image_rec_size;
+	size_t row_buf_size = CY_DATA_MAX_ROW_SIZE;
+	int row_count = 0;
+	u8 *p;
+	u8 *last_row;
+	int rc;
+	int rc_tmp;
+
+	image_rec_size = sizeof(struct cyttsp5_hex_image);
+	if (fw_size % image_rec_size != 0) {
+		dev_err(dev, "%s: Firmware image is misaligned\n", __func__);
+		rc = -EINVAL;
+		goto _cyttsp5_load_app_error;
+	}
+
+	dev_info(dev, "%s: start load app\n", __func__);
+#ifdef TTHE_TUNER_SUPPORT
+	cmd->request_tthe_print(dev, NULL, 0, "start load app");
+#endif
+
+	row_buf = kzalloc(row_buf_size, GFP_KERNEL);
+	row_image = kzalloc(sizeof(struct cyttsp5_hex_image), GFP_KERNEL);
+	dev_id = kzalloc(sizeof(struct cyttsp5_dev_id), GFP_KERNEL);
+	if (!row_buf || !row_image || !dev_id) {
+		rc = -ENOMEM;
+		goto _cyttsp5_load_app_exit;
+	}
+
+	cmd->request_stop_wd(dev);
+
+	dev_info(dev, "%s: Send BL Loader Enter\n", __func__);
+#ifdef TTHE_TUNER_SUPPORT
+	cmd->request_tthe_print(dev, NULL, 0, "Send BL Loader Enter");
+#endif
+	rc = cyttsp5_ldr_enter_(dev, dev_id);
+	if (rc) {
+		dev_err(dev, "%s: Error cannot start Loader (ret=%d)\n",
+			__func__, rc);
+		goto _cyttsp5_load_app_exit;
+	}
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: dev: silicon id=%08X rev=%02X bl=%08X\n",
+		__func__, dev_id->silicon_id, dev_id->rev_id, dev_id->bl_ver);
+
+	/* get last row */
+	last_row = (u8 *)fw + fw_size - image_rec_size;
+	cyttsp5_get_row_(dev, row_buf, last_row, image_rec_size);
+	cyttsp5_ldr_parse_row_(dev, row_buf, row_image);
+
+	/* initialise bootloader */
+	rc = cyttsp5_ldr_init_(dev, row_image);
+	if (rc) {
+		dev_err(dev, "%s: Error cannot init Loader (ret=%d)\n",
+			__func__, rc);
+		goto _cyttsp5_load_app_exit;
+	}
+
+	dev_info(dev, "%s: Send BL Loader Blocks\n", __func__);
+#ifdef TTHE_TUNER_SUPPORT
+	cmd->request_tthe_print(dev, NULL, 0, "Send BL Loader Blocks");
+#endif
+	p = (u8 *)fw;
+	while (p < last_row) {
+		/* Get row */
+		parade_debug(dev, DEBUG_LEVEL_1, "%s: read row=%d\n",
+			__func__, ++row_count);
+		memset(row_buf, 0, row_buf_size);
+		p = cyttsp5_get_row_(dev, row_buf, p, image_rec_size);
+
+		/* Parse row */
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: p=%p buf=%pK buf[0]=%02X\n",
+			__func__, p, row_buf, row_buf[0]);
+		rc = cyttsp5_ldr_parse_row_(dev, row_buf, row_image);
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: array_id=%02X row_num=%04X row_size=%04X\n",
+			__func__, row_image->array_id,
+			row_image->row_num, row_image->row_size);
+		if (rc) {
+			dev_err(dev, "%s: Parse Row Error (a=%d r=%d ret=%d\n",
+				__func__, row_image->array_id,
+				row_image->row_num, rc);
+			goto _cyttsp5_load_app_exit;
+		} else
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"%s: Parse Row (a=%d r=%d ret=%d\n",
+				__func__, row_image->array_id,
+				row_image->row_num, rc);
+
+		/* program row */
+		rc = cyttsp5_ldr_prog_row_(dev, row_image);
+		if (rc) {
+			dev_err(dev,
+				"%s: Program Row Err(array=%d row=%d ret=%d)\n",
+				__func__, row_image->array_id,
+				row_image->row_num, rc);
+			goto _cyttsp5_load_app_exit;
+		}
+
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: array=%d row_cnt=%d row_num=%04X\n",
+			__func__, row_image->array_id, row_count,
+			row_image->row_num);
+	}
+
+	/* exit loader */
+	dev_info(dev, "%s: Send BL Loader Terminate\n", __func__);
+#ifdef TTHE_TUNER_SUPPORT
+	cmd->request_tthe_print(dev, NULL, 0, "Send BL Loader Terminate");
+#endif
+	rc = cyttsp5_ldr_exit_(dev);
+	if (rc) {
+		dev_err(dev, "%s: Error on exit Loader (ret=%d)\n",
+			__func__, rc);
+
+		/* verify app checksum */
+		rc_tmp = cyttsp5_ldr_verify_chksum_(dev);
+		if (rc_tmp)
+			dev_err(dev, "%s: ldr_verify_chksum fail r=%d\n",
+				__func__, rc_tmp);
+		else
+			dev_info(dev, "%s: APP Checksum Verified\n", __func__);
+	}
+
+_cyttsp5_load_app_exit:
+	kfree(row_buf);
+	kfree(row_image);
+	kfree(dev_id);
+_cyttsp5_load_app_error:
+	return rc;
+}
+
+static int cyttsp5_upgrade_firmware(struct device *dev, const u8 *fw_img,
+		int fw_size)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	int retry = CYTTSP5_LOADER_FW_UPGRADE_RETRY_COUNT;
+	bool wait_for_calibration_complete = false;
+	int rc;
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_LDR_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0)
+		goto exit;
+
+	while (retry--) {
+		rc = cyttsp5_load_app_(dev, fw_img, fw_size);
+		if (rc < 0)
+			dev_err(dev,
+				"%s: Firmware update failed rc=%d, retry:%d\n",
+				__func__, rc, retry);
+		else
+			break;
+		msleep(20);
+	}
+	if (rc < 0)
+		dev_err(dev, "%s: Firmware update failed with error code %d\n",
+			__func__, rc);
+	else if (ld->loader_pdata &&
+			(ld->loader_pdata->flags
+			 & CY_LOADER_FLAG_CALIBRATE_AFTER_FW_UPGRADE)) {
+		reinit_completion(&ld->calibration_complete);
+
+		/* set up call back for startup */
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Adding callback for calibration\n", __func__);
+		rc = cmd->subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_LOADER_NAME, cyttsp5_calibration_attention, 0);
+		if (rc) {
+			dev_err(dev,
+				"%s: Failed adding callback for calibration\n",
+				__func__);
+			dev_err(dev, "%s: No calibration will be performed\n",
+				__func__);
+			rc = 0;
+		} else
+			wait_for_calibration_complete = true;
+	}
+
+	cmd->release_exclusive(dev);
+
+exit:
+	if (!rc)
+		cmd->request_restart(dev, true);
+
+	pm_runtime_put_sync(dev);
+
+	if (wait_for_calibration_complete)
+		wait_for_completion(&ld->calibration_complete);
+
+	return rc;
+}
+
+static int cyttsp5_loader_attention(struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+
+	complete(&ld->int_running);
+	return 0;
+}
+#endif /* CYTTSP5_FW_UPGRADE */
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+static int cyttsp5_check_firmware_version_platform(struct device *dev,
+		struct cyttsp5_touch_firmware *fw)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u32 fw_ver_new;
+	u32 fw_revctrl_new;
+	int upgrade;
+
+	if (!ld->si) {
+		dev_info(dev,
+			"%s: No firmware information or device FW corrupted\n",
+			__func__);
+		return CYTTSP5_AUTO_LOAD_FOR_CORRUPTED_FW;
+	}
+
+	fw_ver_new = get_unaligned_be16(fw->ver + 2);
+	/* 4 middle bytes are not used */
+	fw_revctrl_new = get_unaligned_be32(fw->ver + 8);
+
+	upgrade = cyttsp5_check_firmware_version(dev, fw_ver_new,
+		fw_revctrl_new);
+
+	if (upgrade > 0)
+		return 1;
+
+	return 0;
+}
+
+static struct cyttsp5_touch_firmware *cyttsp5_get_platform_firmware(
+		struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	struct cyttsp5_touch_firmware **fws;
+	struct cyttsp5_touch_firmware *fw;
+	u8 panel_id;
+
+	panel_id = cyttsp5_get_panel_id(dev);
+	if (panel_id == PANEL_ID_NOT_ENABLED) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Panel ID not enabled, using legacy firmware\n",
+			__func__);
+		return ld->loader_pdata->fw;
+	}
+
+	fws = ld->loader_pdata->fws;
+	if (!fws) {
+		dev_err(dev, "%s: No firmwares provided\n", __func__);
+		return NULL;
+	}
+
+	/* Find FW according to the Panel ID */
+	while ((fw = *fws++)) {
+		if (fw->panel_id == panel_id) {
+			parade_debug(dev, DEBUG_LEVEL_1,
+				"%s: Found matching fw:%pK(Panel ID: 0x%02X)\n",
+				__func__, fw, fw->panel_id);
+			return fw;
+		}
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Found mismatching fw:%pK(Panel ID: 0x%02X)\n",
+			__func__, fw, fw->panel_id);
+	}
+
+	return NULL;
+}
+
+static int upgrade_firmware_from_platform(struct device *dev,
+		bool forced)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	struct cyttsp5_touch_firmware *fw;
+	int rc = -ENODEV;
+	int upgrade;
+
+	if (!ld->loader_pdata) {
+		dev_err(dev, "%s: No loader platform data\n", __func__);
+		return rc;
+	}
+
+	fw = cyttsp5_get_platform_firmware(dev);
+	if (!fw || !fw->img || !fw->size) {
+		dev_err(dev, "%s: No platform firmware\n", __func__);
+		return rc;
+	}
+
+	if (!fw->ver || !fw->vsize) {
+		dev_err(dev, "%s: No platform firmware version\n",
+			__func__);
+		return rc;
+	}
+
+	if (forced)
+		upgrade = forced;
+	else
+		upgrade = cyttsp5_check_firmware_version_platform(dev, fw);
+
+	if (upgrade)
+		return cyttsp5_upgrade_firmware(dev, fw->img, fw->size);
+
+	return rc;
+}
+#endif /* CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE */
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+static void _cyttsp5_firmware_cont(const struct firmware *fw, void *context)
+{
+	struct device *dev = context;
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u8 header_size = 0;
+
+	if (!fw)
+		goto cyttsp5_firmware_cont_exit;
+
+	if (!fw->data || !fw->size) {
+		dev_err(dev, "%s: No firmware received\n", __func__);
+		goto cyttsp5_firmware_cont_release_exit;
+	}
+
+	header_size = fw->data[0];
+	if (header_size >= (fw->size + 1)) {
+		dev_err(dev, "%s: Firmware format is invalid\n", __func__);
+		goto cyttsp5_firmware_cont_release_exit;
+	}
+
+	cyttsp5_upgrade_firmware(dev, &(fw->data[header_size + 1]),
+		fw->size - (header_size + 1));
+
+cyttsp5_firmware_cont_release_exit:
+	release_firmware(fw);
+
+cyttsp5_firmware_cont_exit:
+	ld->is_manual_upgrade_enabled = 0;
+}
+
+static int cyttsp5_check_firmware_version_builtin(struct device *dev,
+		const struct firmware *fw)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u32 fw_ver_new;
+	u32 fw_revctrl_new;
+	int upgrade;
+
+	if (!ld->si) {
+		dev_info(dev,
+			"%s: No firmware information or device FW corrupted\n",
+			__func__);
+		return CYTTSP5_AUTO_LOAD_FOR_CORRUPTED_FW;
+	}
+
+	fw_ver_new = get_unaligned_be16(fw->data + 3);
+	/* 4 middle bytes are not used */
+	fw_revctrl_new = get_unaligned_be32(fw->data + 9);
+
+	upgrade = cyttsp5_check_firmware_version(dev, fw_ver_new,
+			fw_revctrl_new);
+
+	if (upgrade > 0)
+		return 1;
+
+	return 0;
+}
+
+static void _cyttsp5_firmware_cont_builtin(const struct firmware *fw,
+		void *context)
+{
+	struct device *dev = context;
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	int upgrade;
+
+	if (!fw) {
+		dev_info(dev, "%s: No builtin firmware\n", __func__);
+		goto _cyttsp5_firmware_cont_builtin_exit;
+	}
+
+	if (!fw->data || !fw->size) {
+		dev_err(dev, "%s: Invalid builtin firmware\n", __func__);
+		goto _cyttsp5_firmware_cont_builtin_exit;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: Found firmware\n", __func__);
+
+	upgrade = cyttsp5_check_firmware_version_builtin(dev, fw);
+	if (upgrade) {
+		_cyttsp5_firmware_cont(fw, dev);
+		ld->builtin_bin_fw_status = 0;
+		complete(&ld->builtin_bin_fw_complete);
+		return;
+	}
+
+_cyttsp5_firmware_cont_builtin_exit:
+	release_firmware(fw);
+
+	ld->builtin_bin_fw_status = -EINVAL;
+	complete(&ld->builtin_bin_fw_complete);
+}
+
+static int upgrade_firmware_from_class(struct device *dev)
+{
+	int retval;
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Enabling firmware class loader\n", __func__);
+
+	retval = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+			CY_FW_MANUAL_UPGRADE_FILE_NAME, dev, GFP_KERNEL, dev,
+			_cyttsp5_firmware_cont);
+	if (retval < 0) {
+		dev_err(dev, "%s: Fail request firmware class file load\n",
+			__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+/*
+ * Generates binary FW filename as following:
+ * - Panel ID not enabled: cyttsp5_fw.bin
+ * - Panel ID enabled: cyttsp5_fw_pidXX.bin
+ */
+static char *generate_firmware_filename(struct device *dev)
+{
+	char *filename;
+	u8 panel_id;
+
+#define FILENAME_LEN_MAX 64
+	filename = kzalloc(FILENAME_LEN_MAX, GFP_KERNEL);
+	if (!filename)
+		return NULL;
+
+	panel_id = cyttsp5_get_panel_id(dev);
+	if (panel_id == PANEL_ID_NOT_ENABLED)
+		snprintf(filename, FILENAME_LEN_MAX, "%s", CY_FW_FILE_NAME);
+	else
+		snprintf(filename, FILENAME_LEN_MAX, "%s_pid%02X%s",
+			CY_FW_FILE_PREFIX, panel_id, CY_FW_FILE_SUFFIX);
+
+	parade_debug(dev, DEBUG_LEVEL_1, "%s: Filename: %s\n",
+		__func__, filename);
+
+	return filename;
+}
+
+static int upgrade_firmware_from_builtin(struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	char *filename;
+	int retval;
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Enabling firmware class loader built-in\n",
+		__func__);
+
+	filename = generate_firmware_filename(dev);
+	if (!filename)
+		return -ENOMEM;
+
+	retval = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+			filename, dev, GFP_KERNEL, dev,
+			_cyttsp5_firmware_cont_builtin);
+	if (retval < 0) {
+		dev_err(dev, "%s: Fail request firmware class file load\n",
+			__func__);
+		goto exit;
+	}
+
+	/* wait until FW binary upgrade finishes */
+	wait_for_completion(&ld->builtin_bin_fw_complete);
+
+	retval = ld->builtin_bin_fw_status;
+
+exit:
+	kfree(filename);
+
+	return retval;
+}
+#endif /* CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE */
+
+#if CYTTSP5_TTCONFIG_UPGRADE
+static int cyttsp5_write_config_row_(struct device *dev, u8 ebid,
+		u16 row_number, u16 row_size, u8 *data)
+{
+	int rc;
+	u16 actual_write_len;
+
+	rc = cmd->nonhid_cmd->write_conf_block(dev, 0, row_number,
+			row_size, ebid, data, (u8 *)cyttsp5_security_key,
+			&actual_write_len);
+	if (rc) {
+		dev_err(dev, "%s: Fail Put EBID=%d row=%d cmd fail r=%d\n",
+			__func__, ebid, row_number, rc);
+		return rc;
+	}
+
+	if (actual_write_len != row_size) {
+		dev_err(dev,
+			"%s: Fail Put EBID=%d row=%d wrong write size=%d\n",
+			__func__, ebid, row_number, actual_write_len);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cyttsp5_upgrade_ttconfig(struct device *dev,
+		const u8 *ttconfig_data, int ttconfig_size)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	bool wait_for_calibration_complete = false;
+	u8 ebid = CY_TCH_PARM_EBID;
+	u16 row_size = CY_DATA_ROW_SIZE;
+	u16 table_size;
+	u16 row_count;
+	u16 residue;
+	u8 *row_buf;
+	u8 verify_crc_status;
+	u16 calculated_crc;
+	u16 stored_crc;
+	int rc = 0;
+	int i;
+
+	table_size = ttconfig_size;
+	row_count = table_size / row_size;
+	row_buf = (u8 *)ttconfig_data;
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: size:%d row_size=%d row_count=%d\n",
+		__func__, table_size, row_size, row_count);
+
+	pm_runtime_get_sync(dev);
+
+	rc = cmd->request_exclusive(dev, CY_LDR_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0)
+		goto exit;
+
+	rc = cmd->nonhid_cmd->suspend_scanning(dev, 0);
+	if (rc < 0)
+		goto release;
+
+	for (i = 0; i < row_count; i++) {
+		parade_debug(dev, DEBUG_LEVEL_1, "%s: row=%d size=%d\n",
+			__func__, i, row_size);
+		rc = cyttsp5_write_config_row_(dev, ebid, i, row_size,
+				row_buf);
+		if (rc) {
+			dev_err(dev, "%s: Fail put row=%d r=%d\n",
+				__func__, i, rc);
+			break;
+		}
+		row_buf += row_size;
+	}
+	if (!rc) {
+		residue = table_size % row_size;
+		parade_debug(dev, DEBUG_LEVEL_1, "%s: row=%d size=%d\n",
+			__func__, i, residue);
+		rc = cyttsp5_write_config_row_(dev, ebid, i, residue, row_buf);
+		row_count++;
+		if (rc)
+			dev_err(dev, "%s: Fail put row=%d r=%d\n",
+				__func__, i, rc);
+	}
+
+	if (!rc)
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: TT_CFG updated: rows:%d bytes:%d\n",
+			__func__, row_count, table_size);
+
+	rc = cmd->nonhid_cmd->verify_config_block_crc(dev, 0, ebid,
+			&verify_crc_status, &calculated_crc, &stored_crc);
+	if (rc || verify_crc_status)
+		dev_err(dev,
+			"%s: CRC Failed, ebid=%d, status=%d, scrc=%X ccrc=%X\n",
+			__func__, ebid, verify_crc_status,
+			calculated_crc, stored_crc);
+	else
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: CRC PASS, ebid=%d, status=%d, scrc=%X ccrc=%X\n",
+			__func__, ebid, verify_crc_status,
+			calculated_crc, stored_crc);
+
+	rc = cmd->nonhid_cmd->resume_scanning(dev, 0);
+	if (rc < 0)
+		goto release;
+
+	if (ld->loader_pdata &&
+		(ld->loader_pdata->flags
+			 & CY_LOADER_FLAG_CALIBRATE_AFTER_TTCONFIG_UPGRADE)) {
+		reinit_completion(&ld->calibration_complete);
+		/* set up call back for startup */
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Adding callback for calibration\n",
+			__func__);
+		rc = cmd->subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_LOADER_NAME, cyttsp5_calibration_attention, 0);
+		if (rc) {
+			dev_err(dev,
+				"%s: Failed adding callback for calibration\n",
+				__func__);
+			dev_err(dev,
+				"%s: No calibration will be performed\n",
+				__func__);
+			rc = 0;
+		} else
+			wait_for_calibration_complete = true;
+	}
+
+release:
+	cmd->release_exclusive(dev);
+
+exit:
+	if (!rc)
+		cmd->request_restart(dev, true);
+
+	pm_runtime_put_sync(dev);
+
+	if (wait_for_calibration_complete)
+		wait_for_completion(&ld->calibration_complete);
+
+	return rc;
+}
+#endif /* CYTTSP5_TTCONFIG_UPGRADE */
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE
+static int cyttsp5_get_ttconfig_crc(struct device *dev,
+		const u8 *ttconfig_data, int ttconfig_size, u16 *crc)
+{
+	u16 crc_loc;
+
+	crc_loc = get_unaligned_le16(&ttconfig_data[2]);
+	if (ttconfig_size < crc_loc + 2)
+		return -EINVAL;
+
+	*crc = get_unaligned_le16(&ttconfig_data[crc_loc]);
+
+	return 0;
+}
+
+static int cyttsp5_get_ttconfig_version(struct device *dev,
+		const u8 *ttconfig_data, int ttconfig_size, u16 *version)
+{
+	if (ttconfig_size < CY_TTCONFIG_VERSION_OFFSET
+			+ CY_TTCONFIG_VERSION_SIZE)
+		return -EINVAL;
+
+	*version = get_unaligned_le16(
+		&ttconfig_data[CY_TTCONFIG_VERSION_OFFSET]);
+
+	return 0;
+}
+
+static int cyttsp5_check_ttconfig_version(struct device *dev,
+		const u8 *ttconfig_data, int ttconfig_size)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u16 cfg_crc_new;
+	int rc;
+
+	if (!ld->si)
+		return 0;
+
+	/* Check for config version */
+	if (ld->loader_pdata->flags &
+			CY_LOADER_FLAG_CHECK_TTCONFIG_VERSION) {
+		u16 cfg_ver_new;
+
+		rc = cyttsp5_get_ttconfig_version(dev, ttconfig_data,
+				ttconfig_size, &cfg_ver_new);
+		if (rc)
+			return 0;
+
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: img_ver:0x%04X new_ver:0x%04X\n",
+			__func__, ld->si->cydata.fw_ver_conf, cfg_ver_new);
+
+		/* Check if config version is newer */
+		if (cfg_ver_new > ld->si->cydata.fw_ver_conf) {
+			parade_debug(dev, DEBUG_LEVEL_1,
+				"%s: Config version newer, will upgrade\n",
+				__func__);
+			return 1;
+		}
+
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Config ver identical or older, won't upgrade\n",
+			__func__);
+	/* Check for config CRC */
+	} else {
+		rc = cyttsp5_get_ttconfig_crc(dev, ttconfig_data,
+				ttconfig_size, &cfg_crc_new);
+		if (rc)
+			return 0;
+
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: img_crc:0x%04X new_crc:0x%04X\n",
+			__func__, ld->si->ttconfig.crc, cfg_crc_new);
+
+		if (cfg_crc_new != ld->si->ttconfig.crc) {
+			parade_debug(dev, DEBUG_LEVEL_1,
+				"%s: Config CRC different, will upgrade\n",
+				__func__);
+			return 1;
+		}
+
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: Config CRC equal, will NOT upgrade\n",
+			__func__);
+	}
+
+	return 0;
+}
+
+static int cyttsp5_check_ttconfig_version_platform(struct device *dev,
+		struct cyttsp5_touch_config *ttconfig)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	u32 fw_ver_config;
+	u32 fw_revctrl_config;
+
+	if (!ld->si) {
+		dev_info(dev,
+			"%s: No firmware information or device FW corrupted\n",
+			__func__);
+		return 0;
+	}
+
+	fw_ver_config = get_unaligned_be16(ttconfig->fw_ver + 2);
+	/* 4 middle bytes are not used */
+	fw_revctrl_config = get_unaligned_be32(ttconfig->fw_ver + 8);
+
+	/* FW versions should match */
+	if (cyttsp5_check_firmware_version(dev, fw_ver_config,
+			fw_revctrl_config)) {
+		dev_err(dev, "%s: FW versions mismatch\n", __func__);
+		return 0;
+	}
+
+	/* Check PowerOn Self Test, TT_CFG CRC bit */
+	if ((ld->si->cydata.post_code & CY_POST_TT_CFG_CRC_MASK) == 0) {
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: POST, TT_CFG failed (%X), will upgrade\n",
+			__func__, ld->si->cydata.post_code);
+		return 1;
+	}
+
+	return cyttsp5_check_ttconfig_version(dev, ttconfig->param_regs->data,
+			ttconfig->param_regs->size);
+}
+
+static struct cyttsp5_touch_config *cyttsp5_get_platform_ttconfig(
+		struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	struct cyttsp5_touch_config **ttconfigs;
+	struct cyttsp5_touch_config *ttconfig;
+	u8 panel_id;
+
+	panel_id = cyttsp5_get_panel_id(dev);
+	if (panel_id == PANEL_ID_NOT_ENABLED) {
+		/* TODO: Make debug message */
+		dev_info(dev,
+			"%s: Panel ID not enabled, using legacy ttconfig\n",
+			__func__);
+		return ld->loader_pdata->ttconfig;
+	}
+
+	ttconfigs = ld->loader_pdata->ttconfigs;
+	if (!ttconfigs)
+		return NULL;
+
+	/* Find TT config according to the Panel ID */
+	while ((ttconfig = *ttconfigs++)) {
+		if (ttconfig->panel_id == panel_id) {
+			/* TODO: Make debug message */
+			dev_info(dev,
+				"%s: matching ttconfig:%pK(Panel ID: 0x%02X)\n",
+				__func__, ttconfig, ttconfig->panel_id);
+			return ttconfig;
+		}
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: mismatching ttconfig:%pK(Panel ID: 0x%02X)\n",
+			__func__, ttconfig, ttconfig->panel_id);
+	}
+
+	return NULL;
+}
+
+static int upgrade_ttconfig_from_platform(struct device *dev)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	struct cyttsp5_touch_config *ttconfig;
+	struct touch_settings *param_regs;
+	struct cyttsp5_touch_fw;
+	int rc = -ENODEV;
+	int upgrade;
+
+	if (!ld->loader_pdata) {
+		dev_info(dev, "%s: No loader platform data\n", __func__);
+		return rc;
+	}
+
+	ttconfig = cyttsp5_get_platform_ttconfig(dev);
+	if (!ttconfig) {
+		dev_info(dev, "%s: No ttconfig data\n", __func__);
+		return rc;
+	}
+
+	param_regs = ttconfig->param_regs;
+	if (!param_regs) {
+		dev_info(dev, "%s: No touch parameters\n", __func__);
+		return rc;
+	}
+
+	if (!param_regs->data || !param_regs->size) {
+		dev_info(dev, "%s: Invalid touch parameters\n", __func__);
+		return rc;
+	}
+
+	if (!ttconfig->fw_ver || !ttconfig->fw_vsize) {
+		dev_info(dev, "%s: Invalid FW version for touch parameters\n",
+			__func__);
+		return rc;
+	}
+
+	upgrade = cyttsp5_check_ttconfig_version_platform(dev, ttconfig);
+	if (upgrade)
+		return cyttsp5_upgrade_ttconfig(dev, param_regs->data,
+				param_regs->size);
+
+	return rc;
+}
+#endif /* CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE */
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+static ssize_t cyttsp5_config_data_write(struct file *filp,
+		struct kobject *kobj, struct bin_attribute *bin_attr,
+		char *buf, loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct cyttsp5_loader_data *data = cyttsp5_get_loader_data(dev);
+	u8 *p;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: offset:%lld count:%zu\n",
+		__func__, offset, count);
+
+	mutex_lock(&data->config_lock);
+
+	if (!data->config_loading) {
+		mutex_unlock(&data->config_lock);
+		return -ENODEV;
+	}
+
+	p = krealloc(data->config_data, offset + count, GFP_KERNEL);
+	if (!p) {
+		kfree(data->config_data);
+		data->config_data = NULL;
+		mutex_unlock(&data->config_lock);
+		return -ENOMEM;
+	}
+	data->config_data = p;
+
+	memcpy(&data->config_data[offset], buf, count);
+	data->config_size += count;
+
+	mutex_unlock(&data->config_lock);
+
+	return count;
+}
+
+static struct bin_attribute bin_attr_config_data = {
+	.attr = {
+		.name = "config_data",
+		.mode = 0200,
+	},
+	.size = 0,
+	.write = cyttsp5_config_data_write,
+};
+
+static ssize_t cyttsp5_config_loading_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	bool config_loading;
+
+	mutex_lock(&ld->config_lock);
+	config_loading = ld->config_loading;
+	mutex_unlock(&ld->config_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", config_loading);
+}
+
+static int cyttsp5_verify_ttconfig_binary(struct device *dev,
+		u8 *bin_config_data, int bin_config_size, u8 **start, int *len)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	int header_size;
+	u16 config_size;
+	u32 fw_ver_config;
+	u32 fw_revctrl_config;
+
+	if (!ld->si) {
+		dev_err(dev,
+			"%s: No firmware information or device FW corrupted\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	/*
+	 * We need 11 bytes for FW version control info and at
+	 * least 6 bytes in config (Length + Max Length + CRC)
+	 */
+	header_size = bin_config_data[0] + 1;
+	if (header_size < 11 || header_size >= bin_config_size - 6) {
+		dev_err(dev, "%s: Invalid header size %d\n", __func__,
+			header_size);
+		return -EINVAL;
+	}
+
+	fw_ver_config = get_unaligned_be16(&bin_config_data[1]);
+	/* 4 middle bytes are not used */
+	fw_revctrl_config = get_unaligned_be32(&bin_config_data[7]);
+
+	/* FW versions should match */
+	if (cyttsp5_check_firmware_version(dev, fw_ver_config,
+			fw_revctrl_config)) {
+		dev_err(dev, "%s: FW versions mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	config_size = get_unaligned_le16(&bin_config_data[header_size]);
+	/* Perform a simple size check (2 bytes for CRC) */
+	if (config_size != bin_config_size - header_size - 2) {
+		dev_err(dev, "%s: Config size invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	*start = &bin_config_data[header_size];
+	*len = bin_config_size - header_size;
+
+	return 0;
+}
+
+/*
+ * 1: Start loading TT Config
+ * 0: End loading TT Config and perform upgrade
+ *-1: Exit loading
+ */
+static ssize_t cyttsp5_config_loading_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	long value;
+	u8 *start;
+	int length;
+	int rc;
+
+	rc = kstrtol(buf, 10, &value);
+	if (rc < 0 || value < -1 || value > 1) {
+		dev_err(dev, "%s: Invalid value\n", __func__);
+		return size;
+	}
+
+	mutex_lock(&ld->config_lock);
+
+	if (value == 1)
+		ld->config_loading = true;
+	else if (value == -1)
+		ld->config_loading = false;
+	else if (value == 0 && ld->config_loading) {
+		ld->config_loading = false;
+		if (ld->config_size == 0) {
+			dev_err(dev, "%s: No config data\n", __func__);
+			goto exit_free;
+		}
+
+		rc = cyttsp5_verify_ttconfig_binary(dev,
+				ld->config_data, ld->config_size,
+				&start, &length);
+		if (rc)
+			goto exit_free;
+
+		rc = cyttsp5_upgrade_ttconfig(dev, start, length);
+	}
+
+exit_free:
+	kfree(ld->config_data);
+	ld->config_data = NULL;
+	ld->config_size = 0;
+
+	mutex_unlock(&ld->config_lock);
+
+	if (rc)
+		return rc;
+
+	return size;
+}
+
+static DEVICE_ATTR(config_loading, 0600,
+	cyttsp5_config_loading_show, cyttsp5_config_loading_store);
+#endif /* CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE */
+
+static void cyttsp5_fw_and_config_upgrade(
+		struct work_struct *fw_and_config_upgrade)
+{
+	struct cyttsp5_loader_data *ld = container_of(fw_and_config_upgrade,
+			struct cyttsp5_loader_data, fw_and_config_upgrade);
+	struct device *dev = ld->dev;
+
+	ld->si = cmd->request_sysinfo(dev);
+	if (!ld->si)
+		dev_err(dev, "%s: Fail get sysinfo pointer from core\n",
+			__func__);
+#if !CYTTSP5_FW_UPGRADE
+	dev_info(dev, "%s: No FW upgrade method selected!\n", __func__);
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	if (!upgrade_firmware_from_platform(dev, false))
+		return;
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	if (!upgrade_firmware_from_builtin(dev))
+		return;
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE
+	if (!upgrade_ttconfig_from_platform(dev))
+		return;
+#endif
+}
+
+#if CYTTSP5_FW_UPGRADE
+static int cyttsp5_fw_upgrade_cb(struct device *dev)
+{
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	if (!upgrade_firmware_from_platform(dev, false))
+		return 1;
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	if (!upgrade_firmware_from_builtin(dev))
+		return 1;
+#endif
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+static ssize_t cyttsp5_forced_upgrade_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int rc = upgrade_firmware_from_platform(dev, true);
+
+	if (rc)
+		return rc;
+	return size;
+}
+
+static DEVICE_ATTR(forced_upgrade, 0200,
+	NULL, cyttsp5_forced_upgrade_store);
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+static ssize_t cyttsp5_manual_upgrade_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_loader_data *ld = cyttsp5_get_loader_data(dev);
+	int rc;
+
+	if (ld->is_manual_upgrade_enabled)
+		return -EBUSY;
+
+	ld->is_manual_upgrade_enabled = 1;
+
+	rc = upgrade_firmware_from_class(ld->dev);
+
+	if (rc < 0)
+		ld->is_manual_upgrade_enabled = 0;
+
+	return size;
+}
+
+static DEVICE_ATTR(manual_upgrade, 0200,
+	NULL, cyttsp5_manual_upgrade_store);
+#endif
+
+static int cyttsp5_loader_probe(struct device *dev, void **data)
+{
+	struct cyttsp5_loader_data *ld;
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	int rc;
+
+	if (!pdata || !pdata->loader_pdata) {
+		dev_err(dev, "%s: Missing platform data\n", __func__);
+		rc = -ENODEV;
+		goto error_no_pdata;
+	}
+
+	ld = kzalloc(sizeof(*ld), GFP_KERNEL);
+	if (!ld) {
+		rc = -ENOMEM;
+		goto error_alloc_data_failed;
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	rc = device_create_file(dev, &dev_attr_forced_upgrade);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create forced_upgrade\n",
+				__func__);
+		goto error_create_forced_upgrade;
+	}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	rc = device_create_file(dev, &dev_attr_manual_upgrade);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create manual_upgrade\n",
+				__func__);
+		goto error_create_manual_upgrade;
+	}
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+	rc = device_create_file(dev, &dev_attr_config_loading);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create config_loading\n",
+				__func__);
+		goto error_create_config_loading;
+	}
+
+	rc = device_create_bin_file(dev, &bin_attr_config_data);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create config_data\n",
+				__func__);
+		goto error_create_config_data;
+	}
+#endif
+
+	ld->loader_pdata = pdata->loader_pdata;
+	ld->dev = dev;
+	*data = ld;
+
+#if CYTTSP5_FW_UPGRADE
+	init_completion(&ld->int_running);
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	init_completion(&ld->builtin_bin_fw_complete);
+#endif
+	cmd->subscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_LOADER_NAME,
+		cyttsp5_loader_attention, CY_MODE_BOOTLOADER);
+
+	cmd->subscribe_attention(dev, CY_ATTEN_LOADER, CYTTSP5_LOADER_NAME,
+		cyttsp5_fw_upgrade_cb, CY_MODE_UNKNOWN);
+#endif
+#if CYTTSP5_FW_UPGRADE || CYTTSP5_TTCONFIG_UPGRADE
+	init_completion(&ld->calibration_complete);
+	INIT_WORK(&ld->calibration_work, cyttsp5_calibrate_idacs);
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+	mutex_init(&ld->config_lock);
+#endif
+
+#ifdef UPGRADE_FW_AND_CONFIG_IN_PROBE
+	/* Call FW and config upgrade directly in probe */
+	cyttsp5_fw_and_config_upgrade(&ld->fw_and_config_upgrade);
+#else
+	INIT_WORK(&ld->fw_and_config_upgrade, cyttsp5_fw_and_config_upgrade);
+	schedule_work(&ld->fw_and_config_upgrade);
+#endif
+
+	dev_dbg(dev, "%s: Successful probe %s\n", __func__, dev_name(dev));
+	return 0;
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+error_create_config_data:
+	device_remove_file(dev, &dev_attr_config_loading);
+error_create_config_loading:
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	device_remove_file(dev, &dev_attr_manual_upgrade);
+error_create_manual_upgrade:
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	device_remove_file(dev, &dev_attr_forced_upgrade);
+error_create_forced_upgrade:
+#endif
+	kfree(ld);
+error_alloc_data_failed:
+error_no_pdata:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+static void cyttsp5_loader_release(struct device *dev, void *data)
+{
+	struct cyttsp5_loader_data *ld = (struct cyttsp5_loader_data *)data;
+
+#if CYTTSP5_FW_UPGRADE
+	cmd->unsubscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_LOADER_NAME,
+		cyttsp5_loader_attention, CY_MODE_BOOTLOADER);
+
+	cmd->unsubscribe_attention(dev, CY_ATTEN_LOADER, CYTTSP5_LOADER_NAME,
+		cyttsp5_fw_upgrade_cb, CY_MODE_UNKNOWN);
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MANUAL_TTCONFIG_UPGRADE
+	device_remove_bin_file(dev, &bin_attr_config_data);
+	device_remove_file(dev, &dev_attr_config_loading);
+	kfree(ld->config_data);
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BINARY_FW_UPGRADE
+	device_remove_file(dev, &dev_attr_manual_upgrade);
+#endif
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	device_remove_file(dev, &dev_attr_forced_upgrade);
+#endif
+	kfree(ld);
+}
+
+static struct cyttsp5_module loader_module = {
+	.name = CYTTSP5_LOADER_NAME,
+	.probe = cyttsp5_loader_probe,
+	.release = cyttsp5_loader_release,
+};
+
+static int __init cyttsp5_loader_init(void)
+{
+	int rc;
+
+	cmd = cyttsp5_get_commands();
+	if (!cmd)
+		return -EINVAL;
+
+	rc = cyttsp5_register_module(&loader_module);
+	if (rc < 0) {
+		pr_err("%s: Error, failed registering module\n",
+			__func__);
+			return rc;
+	}
+
+	pr_info("%s: Parade TTSP FW Loader Driver (Built %s) rc=%d\n",
+		 __func__, CY_DRIVER_VERSION, rc);
+	return 0;
+}
+module_init(cyttsp5_loader_init);
+
+static void __exit cyttsp5_loader_exit(void)
+{
+	cyttsp5_unregister_module(&loader_module);
+}
+module_exit(cyttsp5_loader_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product FW Loader Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_mt_common.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_mt_common.c
new file mode 100644
index 0000000..69fe997
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_mt_common.c
@@ -0,0 +1,741 @@
+/*
+ * cyttsp5_mt_common.c
+ * Parade TrueTouch(TM) Standard Product V5 Multi-Touch Reports Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+#define CYTTSP5_MT_NAME "cyttsp5_mt"
+
+#define MT_PARAM_SIGNAL(md, sig_ost) PARAM_SIGNAL(md->pdata->frmwrk, sig_ost)
+#define MT_PARAM_MIN(md, sig_ost) PARAM_MIN(md->pdata->frmwrk, sig_ost)
+#define MT_PARAM_MAX(md, sig_ost) PARAM_MAX(md->pdata->frmwrk, sig_ost)
+#define MT_PARAM_FUZZ(md, sig_ost) PARAM_FUZZ(md->pdata->frmwrk, sig_ost)
+#define MT_PARAM_FLAT(md, sig_ost) PARAM_FLAT(md->pdata->frmwrk, sig_ost)
+
+static void cyttsp5_mt_lift_all(struct cyttsp5_mt_data *md)
+{
+	int max = md->si->tch_abs[CY_TCH_T].max;
+
+	if (md->num_prv_rec != 0) {
+		if (md->mt_function.report_slot_liftoff)
+			md->mt_function.report_slot_liftoff(md, max);
+		input_sync(md->input);
+		md->num_prv_rec = 0;
+	}
+}
+
+static void cyttsp5_get_touch_axis(struct cyttsp5_mt_data *md,
+	int *axis, int size, int max, u8 *xy_data, int bofs)
+{
+	int nbyte;
+	int next;
+
+	for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) {
+		parade_debug(md->dev, DEBUG_LEVEL_2,
+			"%s: *axis=%02X(%d) size=%d max=%08X xy_data=%pK",
+			__func__, *axis, *axis, size, max, xy_data);
+		parade_debug(md->dev, DEBUG_LEVEL_2,
+			" xy_data[%d]=%02X(%d) bofs=%d\n",
+			next, xy_data[next], xy_data[next], bofs);
+		*axis = *axis + ((xy_data[next] >> bofs) << (nbyte * 8));
+		next++;
+	}
+
+	*axis &= max - 1;
+
+	parade_debug(md->dev, DEBUG_LEVEL_2,
+		"%s: *axis=%02X(%d) size=%d max=%08X xy_data=%pK",
+		__func__, *axis, *axis, size, max, xy_data);
+	parade_debug(md->dev, DEBUG_LEVEL_2,
+		"xy_data[%d]=%02X(%d)\n",
+		next, xy_data[next], xy_data[next]);
+}
+
+static void cyttsp5_get_touch_hdr(struct cyttsp5_mt_data *md,
+	struct cyttsp5_touch *touch, u8 *xy_mode)
+{
+	struct device *dev = md->dev;
+	struct cyttsp5_sysinfo *si = md->si;
+	enum cyttsp5_tch_hdr hdr;
+
+	for (hdr = CY_TCH_TIME; hdr < CY_TCH_NUM_HDR; hdr++) {
+		if (!si->tch_hdr[hdr].report)
+			continue;
+		cyttsp5_get_touch_axis(md, &touch->hdr[hdr],
+			si->tch_hdr[hdr].size,
+			si->tch_hdr[hdr].max,
+			xy_mode + si->tch_hdr[hdr].ofs,
+			si->tch_hdr[hdr].bofs);
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: get %s=%04X(%d)\n",
+			__func__, cyttsp5_tch_hdr_string[hdr],
+			touch->hdr[hdr], touch->hdr[hdr]);
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_1,
+		"%s: time=%X tch_num=%d lo=%d noise=%d counter=%d\n",
+		__func__,
+		touch->hdr[CY_TCH_TIME],
+		touch->hdr[CY_TCH_NUM],
+		touch->hdr[CY_TCH_LO],
+		touch->hdr[CY_TCH_NOISE],
+		touch->hdr[CY_TCH_COUNTER]);
+}
+
+static void cyttsp5_get_touch_record(struct cyttsp5_mt_data *md,
+	struct cyttsp5_touch *touch, u8 *xy_data)
+{
+	struct device *dev = md->dev;
+	struct cyttsp5_sysinfo *si = md->si;
+	enum cyttsp5_tch_abs abs;
+
+	for (abs = CY_TCH_X; abs < CY_TCH_NUM_ABS; abs++) {
+		if (!si->tch_abs[abs].report)
+			continue;
+		cyttsp5_get_touch_axis(md, &touch->abs[abs],
+			si->tch_abs[abs].size,
+			si->tch_abs[abs].max,
+			xy_data + si->tch_abs[abs].ofs,
+			si->tch_abs[abs].bofs);
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: get %s=%04X(%d)\n",
+			__func__, cyttsp5_tch_abs_string[abs],
+			touch->abs[abs], touch->abs[abs]);
+	}
+}
+
+static void cyttsp5_mt_process_touch(struct cyttsp5_mt_data *md,
+	struct cyttsp5_touch *touch)
+{
+	struct device *dev = md->dev;
+	struct cyttsp5_sysinfo *si = md->si;
+	int tmp;
+	bool flipped;
+
+
+	/* Orientation is signed */
+	touch->abs[CY_TCH_OR] = (int8_t)touch->abs[CY_TCH_OR];
+
+	if (md->pdata->flags & CY_MT_FLAG_FLIP) {
+		tmp = touch->abs[CY_TCH_X];
+		touch->abs[CY_TCH_X] = touch->abs[CY_TCH_Y];
+		touch->abs[CY_TCH_Y] = tmp;
+		if (touch->abs[CY_TCH_OR] > 0)
+			touch->abs[CY_TCH_OR] =
+				md->or_max - touch->abs[CY_TCH_OR];
+		else
+			touch->abs[CY_TCH_OR] =
+				md->or_min - touch->abs[CY_TCH_OR];
+		flipped = true;
+	} else
+		flipped = false;
+
+	if (md->pdata->flags & CY_MT_FLAG_INV_X) {
+		if (flipped)
+			touch->abs[CY_TCH_X] = si->sensing_conf_data.res_y -
+				touch->abs[CY_TCH_X];
+		else
+			touch->abs[CY_TCH_X] = si->sensing_conf_data.res_x -
+				touch->abs[CY_TCH_X];
+		touch->abs[CY_TCH_OR] *= -1;
+	}
+	if (md->pdata->flags & CY_MT_FLAG_INV_Y) {
+		if (flipped)
+			touch->abs[CY_TCH_Y] = si->sensing_conf_data.res_x -
+				touch->abs[CY_TCH_Y];
+		else
+			touch->abs[CY_TCH_Y] = si->sensing_conf_data.res_y -
+				touch->abs[CY_TCH_Y];
+		touch->abs[CY_TCH_OR] *= -1;
+	}
+
+	/* Convert MAJOR/MINOR from mm to resolution */
+	tmp = touch->abs[CY_TCH_MAJ] * 100 * si->sensing_conf_data.res_x;
+	touch->abs[CY_TCH_MAJ] = tmp / si->sensing_conf_data.len_x;
+	tmp = touch->abs[CY_TCH_MIN] * 100 * si->sensing_conf_data.res_x;
+	touch->abs[CY_TCH_MIN] = tmp / si->sensing_conf_data.len_x;
+
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: flip=%s inv-x=%s inv-y=%s x=%04X(%d) y=%04X(%d)\n",
+		__func__, flipped ? "true" : "false",
+		md->pdata->flags & CY_MT_FLAG_INV_X ? "true" : "false",
+		md->pdata->flags & CY_MT_FLAG_INV_Y ? "true" : "false",
+		touch->abs[CY_TCH_X], touch->abs[CY_TCH_X],
+		touch->abs[CY_TCH_Y], touch->abs[CY_TCH_Y]);
+}
+
+static void cyttsp5_report_event(struct cyttsp5_mt_data *md, int event,
+		int value)
+{
+	int sig = MT_PARAM_SIGNAL(md, event);
+
+	if (sig != CY_IGNORE_VALUE)
+		input_report_abs(md->input, sig, value);
+}
+
+static void cyttsp5_get_mt_touches(struct cyttsp5_mt_data *md,
+		struct cyttsp5_touch *tch, int num_cur_tch)
+{
+	struct device *dev = md->dev;
+	struct cyttsp5_sysinfo *si = md->si;
+	int sig;
+	int i, j, t = 0;
+	DECLARE_BITMAP(ids, si->tch_abs[CY_TCH_T].max);
+	int mt_sync_count = 0;
+	u8 *tch_addr;
+
+	bitmap_zero(ids, si->tch_abs[CY_TCH_T].max);
+	memset(tch->abs, 0, sizeof(tch->abs));
+
+	for (i = 0; i < num_cur_tch; i++) {
+		tch_addr = si->xy_data + (i * si->desc.tch_record_size);
+		cyttsp5_get_touch_record(md, tch, tch_addr);
+
+		/*  Discard proximity event */
+		if (tch->abs[CY_TCH_O] == CY_OBJ_PROXIMITY) {
+			parade_debug(dev, DEBUG_LEVEL_2,
+				"%s: Discarding proximity event\n",
+				__func__);
+			continue;
+		}
+
+		/* Validate track_id */
+		t = tch->abs[CY_TCH_T];
+		if (t < md->t_min || t > md->t_max) {
+			dev_err(dev, "%s: tch=%d -> bad trk_id=%d max_id=%d\n",
+				__func__, i, t, md->t_max);
+			if (md->mt_function.input_sync)
+				md->mt_function.input_sync(md->input);
+			mt_sync_count++;
+			continue;
+		}
+
+		/* Lift-off */
+		if (tch->abs[CY_TCH_E] == CY_EV_LIFTOFF) {
+			parade_debug(dev, DEBUG_LEVEL_1,
+				"%s: t=%d e=%d lift-off\n",
+				__func__, t, tch->abs[CY_TCH_E]);
+			goto cyttsp5_get_mt_touches_pr_tch;
+		}
+
+		/* Process touch */
+		cyttsp5_mt_process_touch(md, tch);
+
+		/* use 0 based track id's */
+		t -= md->t_min;
+
+		sig = MT_PARAM_SIGNAL(md, CY_ABS_ID_OST);
+		if (sig != CY_IGNORE_VALUE) {
+			if (md->mt_function.input_report)
+				md->mt_function.input_report(md->input, sig,
+						t, tch->abs[CY_TCH_O]);
+			__set_bit(t, ids);
+		}
+
+		/* If touch type is hover, send P as distance, reset P */
+		if (tch->abs[CY_TCH_O] == CY_OBJ_HOVER) {
+			/* CY_ABS_D_OST signal must be in touch framework */
+			cyttsp5_report_event(md, CY_ABS_D_OST,
+					tch->abs[CY_TCH_P]);
+			tch->abs[CY_TCH_P] = 0;
+		} else
+			cyttsp5_report_event(md, CY_ABS_D_OST, 0);
+
+
+		/* all devices: position and pressure fields */
+		for (j = 0; j <= CY_ABS_W_OST; j++) {
+			if (!si->tch_abs[j].report)
+				continue;
+			cyttsp5_report_event(md, CY_ABS_X_OST + j,
+					tch->abs[CY_TCH_X + j]);
+		}
+
+		/* Get the extended touch fields */
+		for (j = 0; j < CY_NUM_EXT_TCH_FIELDS; j++) {
+			if (!si->tch_abs[CY_ABS_MAJ_OST + j].report)
+				continue;
+			cyttsp5_report_event(md, CY_ABS_MAJ_OST + j,
+					tch->abs[CY_TCH_MAJ + j]);
+		}
+		if (md->mt_function.input_sync)
+			md->mt_function.input_sync(md->input);
+		mt_sync_count++;
+
+		input_report_key(md->input, BTN_TOUCH, 1);
+cyttsp5_get_mt_touches_pr_tch:
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"%s: t=%d x=%d y=%d z=%d M=%d m=%d",
+			__func__, t,
+			tch->abs[CY_TCH_X],
+			tch->abs[CY_TCH_Y],
+			tch->abs[CY_TCH_P],
+			tch->abs[CY_TCH_MAJ],
+			tch->abs[CY_TCH_MIN]);
+		parade_debug(dev, DEBUG_LEVEL_1,
+			"o=%d e=%d obj=%d tip=%d\n",
+			tch->abs[CY_TCH_OR],
+			tch->abs[CY_TCH_E],
+			tch->abs[CY_TCH_O],
+			tch->abs[CY_TCH_TIP]);
+	}
+
+	if (md->mt_function.final_sync)
+		md->mt_function.final_sync(md->input,
+				si->tch_abs[CY_TCH_T].max, mt_sync_count, ids);
+
+	md->num_prv_rec = num_cur_tch;
+}
+
+/* read xy_data for all current touches */
+static int cyttsp5_xy_worker(struct cyttsp5_mt_data *md)
+{
+	struct device *dev = md->dev;
+	struct cyttsp5_sysinfo *si = md->si;
+	int max_tch = si->sensing_conf_data.max_tch;
+	struct cyttsp5_touch tch;
+	u8 num_cur_tch;
+	int rc = 0;
+
+	cyttsp5_get_touch_hdr(md, &tch, si->xy_mode + 3);
+
+	num_cur_tch = tch.hdr[CY_TCH_NUM];
+	if (num_cur_tch > max_tch) {
+		dev_err(dev, "%s: Num touch err detected (n=%d)\n",
+			__func__, num_cur_tch);
+		num_cur_tch = max_tch;
+	}
+
+	if (tch.hdr[CY_TCH_LO]) {
+		parade_debug(dev, DEBUG_LEVEL_1, "%s: Large area detected\n",
+			__func__);
+		if (md->pdata->flags & CY_MT_FLAG_NO_TOUCH_ON_LO)
+			num_cur_tch = 0;
+	}
+
+	if (num_cur_tch == 0 && md->num_prv_rec == 0)
+		goto cyttsp5_xy_worker_exit;
+
+	/* extract xy_data for all currently reported touches */
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: extract data num_cur_tch=%d\n",
+		__func__, num_cur_tch);
+	if (num_cur_tch)
+		cyttsp5_get_mt_touches(md, &tch, num_cur_tch);
+	else {
+		input_report_key(md->input, BTN_TOUCH, 0);
+		cyttsp5_mt_lift_all(md);
+	}
+
+	rc = 0;
+
+cyttsp5_xy_worker_exit:
+	return rc;
+}
+
+static void cyttsp5_mt_send_dummy_event(struct cyttsp5_core_data *cd,
+		struct cyttsp5_mt_data *md)
+{
+
+	u8 key_value = 0;
+
+	dev_err(cd->dev, "%s report_id:%X\n", __func__, cd->input_buf[2]);
+
+	switch (cd->input_buf[3]) {
+	case GESTURE_DOUBLE_TAP:
+		key_value = KEY_F1;
+	break;
+	case GESTURE_TWO_FINGERS_SLIDE:
+		key_value = KEY_F2;
+	break;
+	case GESTURE_TOUCH_DETECTED:
+		key_value = KEY_POWER;
+	break;
+	case GESTURE_PUSH_BUTTON:
+		key_value = KEY_POWER;
+	break;
+	case GESTURE_SINGLE_SLIDE_DE_TX:
+		key_value = KEY_F5;
+	break;
+	case GESTURE_SINGLE_SLIDE_IN_TX:
+		key_value = KEY_F6;
+	break;
+	case GESTURE_SINGLE_SLIDE_DE_RX:
+		key_value = KEY_F7;
+	break;
+	case GESTURE_SINGLE_SLIDE_IN_RX:
+		key_value = KEY_F8;
+	break;
+	case GESTURE_SINGLE_QUICK_CALL:
+		//key_value = KEY_QUICKCALL;
+		key_value = KEY_POWER;
+	break;
+	case 0xAA:
+		key_value = KEY_POWER;
+	break;
+
+	default:
+	break;
+	}
+
+	input_report_key(md->input, key_value, 1);
+	input_sync(md->input);
+
+	input_report_key(md->input, key_value, 0);
+	input_sync(md->input);
+
+}
+
+static int cyttsp5_mt_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+	int rc;
+
+	if (md->si->xy_mode[2] !=  md->si->desc.tch_report_id)
+		return 0;
+
+	/* core handles handshake */
+	mutex_lock(&md->mt_lock);
+	rc = cyttsp5_xy_worker(md);
+	mutex_unlock(&md->mt_lock);
+	if (rc < 0)
+		dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int cyttsp5_mt_wake_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	mutex_lock(&md->mt_lock);
+	cyttsp5_mt_send_dummy_event(cd, md);
+	mutex_unlock(&md->mt_lock);
+	return 0;
+}
+
+static int cyttsp5_startup_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	mutex_lock(&md->mt_lock);
+	cyttsp5_mt_lift_all(md);
+	mutex_unlock(&md->mt_lock);
+
+	return 0;
+}
+
+static int cyttsp5_mt_suspend_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	mutex_lock(&md->mt_lock);
+	cyttsp5_mt_lift_all(md);
+	md->is_suspended = true;
+	mutex_unlock(&md->mt_lock);
+
+	pm_runtime_put(dev);
+
+	return 0;
+}
+
+static int cyttsp5_mt_resume_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	pm_runtime_get(dev);
+
+	mutex_lock(&md->mt_lock);
+	md->is_suspended = false;
+	mutex_unlock(&md->mt_lock);
+
+	return 0;
+}
+
+static int cyttsp5_mt_open(struct input_dev *input)
+{
+	struct device *dev = input->dev.parent;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	pm_runtime_get_sync(dev);
+
+	mutex_lock(&md->mt_lock);
+	md->is_suspended = false;
+	mutex_unlock(&md->mt_lock);
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: setup subscriptions\n", __func__);
+
+	/* set up touch call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_MT_NAME,
+		cyttsp5_mt_attention, CY_MODE_OPERATIONAL);
+
+	/* set up startup call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_MT_NAME,
+		cyttsp5_startup_attention, 0);
+
+	/* set up wakeup call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_WAKE, CYTTSP5_MT_NAME,
+		cyttsp5_mt_wake_attention, 0);
+
+	/* set up suspend call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_SUSPEND, CYTTSP5_MT_NAME,
+		cyttsp5_mt_suspend_attention, 0);
+
+	/* set up resume call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_RESUME, CYTTSP5_MT_NAME,
+		cyttsp5_mt_resume_attention, 0);
+
+	return 0;
+}
+
+static void cyttsp5_mt_close(struct input_dev *input)
+{
+	struct device *dev = input->dev.parent;
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_MT_NAME,
+		cyttsp5_mt_attention, CY_MODE_OPERATIONAL);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_MT_NAME,
+		cyttsp5_startup_attention, 0);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_WAKE, CYTTSP5_MT_NAME,
+		cyttsp5_mt_wake_attention, 0);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_SUSPEND, CYTTSP5_MT_NAME,
+		cyttsp5_mt_suspend_attention, 0);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_RESUME, CYTTSP5_MT_NAME,
+		cyttsp5_mt_resume_attention, 0);
+
+	mutex_lock(&md->mt_lock);
+	if (!md->is_suspended) {
+		pm_runtime_put(dev);
+		md->is_suspended = true;
+	}
+	mutex_unlock(&md->mt_lock);
+}
+
+static int cyttsp5_setup_input_device(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+	int signal = CY_IGNORE_VALUE;
+	int max_x, max_y, max_p, min, max;
+	int max_x_tmp, max_y_tmp;
+	int i;
+	int rc;
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Initialize event signals\n",
+		__func__);
+	__set_bit(EV_ABS, md->input->evbit);
+	__set_bit(EV_SYN, md->input->evbit);
+	//__set_bit(EV_REL, md->input->evbit);
+	__set_bit(EV_KEY, md->input->evbit);
+	__set_bit(BTN_TOUCH, md->input->keybit);
+
+#ifdef INPUT_PROP_DIRECT
+	__set_bit(INPUT_PROP_DIRECT, md->input->propbit);
+#endif
+
+	/* If virtualkeys enabled, don't use all screen */
+	if (md->pdata->flags & CY_MT_FLAG_VKEYS) {
+		max_x_tmp = md->pdata->vkeys_x;
+		max_y_tmp = md->pdata->vkeys_y;
+	} else {
+		max_x_tmp = md->si->sensing_conf_data.res_x;
+		max_y_tmp = md->si->sensing_conf_data.res_y;
+	}
+
+	/* get maximum values from the sysinfo data */
+	if (md->pdata->flags & CY_MT_FLAG_FLIP) {
+		max_x = max_y_tmp - 1;
+		max_y = max_x_tmp - 1;
+	} else {
+		max_x = max_x_tmp - 1;
+		max_y = max_y_tmp - 1;
+	}
+	max_p = md->si->sensing_conf_data.max_z;
+
+	/* set event signal capabilities */
+	for (i = 0; i < NUM_SIGNALS(md->pdata->frmwrk); i++) {
+		signal = MT_PARAM_SIGNAL(md, i);
+		if (signal != CY_IGNORE_VALUE) {
+			__set_bit(signal, md->input->absbit);
+
+			min = MT_PARAM_MIN(md, i);
+			max = MT_PARAM_MAX(md, i);
+			if (i == CY_ABS_ID_OST) {
+				/* shift track ids down to start at 0 */
+				max = max - min;
+				min = min - min;
+			} else if (i == CY_ABS_X_OST)
+				max = max_x;
+			else if (i == CY_ABS_Y_OST)
+				max = max_y;
+			else if (i == CY_ABS_P_OST)
+				max = max_p;
+
+			input_set_abs_params(md->input, signal, min, max,
+				MT_PARAM_FUZZ(md, i), MT_PARAM_FLAT(md, i));
+			parade_debug(dev, DEBUG_LEVEL_1,
+				"%s: register signal=%02X min=%d max=%d\n",
+				__func__, signal, min, max);
+		}
+	}
+
+	md->or_min = MT_PARAM_MIN(md, CY_ABS_OR_OST);
+	md->or_max = MT_PARAM_MAX(md, CY_ABS_OR_OST);
+
+	md->t_min = MT_PARAM_MIN(md, CY_ABS_ID_OST);
+	md->t_max = MT_PARAM_MAX(md, CY_ABS_ID_OST);
+
+	rc = md->mt_function.input_register_device(md->input,
+			md->si->tch_abs[CY_TCH_T].max);
+	if (rc < 0)
+		dev_err(dev, "%s: Error, failed register input device r=%d\n",
+			__func__, rc);
+	else
+		md->input_device_registered = true;
+
+	input_set_capability(md->input, EV_KEY, KEY_F1);
+	input_set_capability(md->input, EV_KEY, KEY_F2);
+	input_set_capability(md->input, EV_KEY, KEY_POWER);
+
+	input_set_capability(md->input, EV_KEY, KEY_F5);
+	input_set_capability(md->input, EV_KEY, KEY_F6);
+	input_set_capability(md->input, EV_KEY, KEY_F7);
+	input_set_capability(md->input, EV_KEY, KEY_F8);
+	//input_set_capability(md->input, EV_KEY, KEY_QUICKCALL);
+
+	return rc;
+}
+
+static int cyttsp5_setup_input_attention(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+	int rc;
+
+	md->si = _cyttsp5_request_sysinfo(dev);
+	if (!md->si)
+		return -EINVAL;
+
+	rc = cyttsp5_setup_input_device(dev);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP, CYTTSP5_MT_NAME,
+		cyttsp5_setup_input_attention, 0);
+
+	return rc;
+}
+
+int cyttsp5_mt_probe(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	struct cyttsp5_mt_platform_data *mt_pdata;
+	int rc = 0;
+
+	if (!pdata || !pdata->mt_pdata) {
+		dev_err(dev, "%s: Missing platform data\n", __func__);
+		rc = -ENODEV;
+		goto error_no_pdata;
+	}
+	mt_pdata = pdata->mt_pdata;
+
+	cyttsp5_init_function_ptrs(md);
+
+	mutex_init(&md->mt_lock);
+	md->dev = dev;
+	md->pdata = mt_pdata;
+
+	/* Create the input device and register it. */
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Create the input device and register it\n", __func__);
+	md->input = input_allocate_device();
+	if (!md->input) {
+		dev_err(dev, "%s: Error, failed to allocate input device\n",
+			__func__);
+		rc = -ENODEV;
+		goto error_alloc_failed;
+	}
+
+	if (md->pdata->inp_dev_name)
+		md->input->name = md->pdata->inp_dev_name;
+	else
+		md->input->name = CYTTSP5_MT_NAME;
+	scnprintf(md->phys, sizeof(md->phys), "%s/input%d", dev_name(dev),
+			cd->phys_num++);
+	md->input->phys = md->phys;
+	md->input->dev.parent = md->dev;
+	md->input->open = cyttsp5_mt_open;
+	md->input->close = cyttsp5_mt_close;
+	input_set_drvdata(md->input, md);
+
+	/* get sysinfo */
+	md->si = _cyttsp5_request_sysinfo(dev);
+
+	if (md->si) {
+		rc = cyttsp5_setup_input_device(dev);
+		if (rc)
+			goto error_init_input;
+	} else {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core p=%pK\n",
+			__func__, md->si);
+		_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_MT_NAME, cyttsp5_setup_input_attention, 0);
+	}
+
+	return 0;
+
+error_init_input:
+	input_free_device(md->input);
+error_alloc_failed:
+error_no_pdata:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+int cyttsp5_mt_release(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_mt_data *md = &cd->md;
+
+	if (md->input_device_registered) {
+		input_unregister_device(md->input);
+	} else {
+		input_free_device(md->input);
+		_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_MT_NAME, cyttsp5_setup_input_attention, 0);
+	}
+
+	return 0;
+}
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_mta.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_mta.c
new file mode 100644
index 0000000..25089db
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_mta.c
@@ -0,0 +1,85 @@
+/*
+ * cyttsp5_mta.c
+ * Parade TrueTouch(TM) Standard Product V5 Multi-Touch Protocol A Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+static void cyttsp5_final_sync(struct input_dev *input, int max_slots,
+		int mt_sync_count, unsigned long *ids)
+{
+	if (mt_sync_count)
+		input_sync(input);
+}
+
+static void cyttsp5_input_sync(struct input_dev *input)
+{
+	input_mt_sync(input);
+}
+
+static void cyttsp5_input_report(struct input_dev *input, int sig,
+		int t, int type)
+{
+	if (type == CY_OBJ_STANDARD_FINGER || type == CY_OBJ_GLOVE
+			|| type == CY_OBJ_HOVER) {
+		input_report_key(input, BTN_TOOL_FINGER, CY_BTN_PRESSED);
+		input_report_key(input, BTN_TOOL_PEN, CY_BTN_RELEASED);
+	} else if (type == CY_OBJ_STYLUS) {
+		input_report_key(input, BTN_TOOL_PEN, CY_BTN_PRESSED);
+		input_report_key(input, BTN_TOOL_FINGER, CY_BTN_RELEASED);
+	}
+
+	if (type != CY_OBJ_HOVER)
+		input_report_key(input, BTN_TOUCH, CY_BTN_PRESSED);
+
+	input_report_abs(input, sig, t);
+}
+
+static void cyttsp5_report_slot_liftoff(struct cyttsp5_mt_data *md,
+		int max_slots)
+{
+	input_report_key(md->input, BTN_TOUCH, CY_BTN_RELEASED);
+	input_report_key(md->input, BTN_TOOL_FINGER, CY_BTN_RELEASED);
+	input_report_key(md->input, BTN_TOOL_PEN, CY_BTN_RELEASED);
+
+}
+
+static int cyttsp5_input_register_device(struct input_dev *input, int max_slots)
+{
+	__set_bit(BTN_TOUCH, input->keybit);
+	__set_bit(BTN_TOOL_FINGER, input->keybit);
+	__set_bit(BTN_TOOL_PEN, input->keybit);
+	return input_register_device(input);
+}
+
+void cyttsp5_init_function_ptrs(struct cyttsp5_mt_data *md)
+{
+	md->mt_function.report_slot_liftoff = cyttsp5_report_slot_liftoff;
+	md->mt_function.final_sync = cyttsp5_final_sync;
+	md->mt_function.input_sync = cyttsp5_input_sync;
+	md->mt_function.input_report = cyttsp5_input_report;
+	md->mt_function.input_register_device = cyttsp5_input_register_device;
+}
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_mtb.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_mtb.c
new file mode 100644
index 0000000..5846d6e
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_mtb.c
@@ -0,0 +1,89 @@
+/*
+ * cyttsp5_mtb.c
+ * Parade TrueTouch(TM) Standard Product V5 Multi-Touch Protocol B Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/input/mt.h>
+#include <linux/version.h>
+#include "cyttsp5_regs.h"
+
+static void cyttsp5_final_sync(struct input_dev *input, int max_slots,
+		int mt_sync_count, unsigned long *ids)
+{
+	int t;
+
+	for (t = 0; t < max_slots; t++) {
+		if (test_bit(t, ids))
+			continue;
+		input_mt_slot(input, t);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+	}
+
+	input_sync(input);
+}
+
+static void cyttsp5_input_report(struct input_dev *input, int sig,
+		int t, int type)
+{
+	input_mt_slot(input, t);
+
+	if (type == CY_OBJ_STANDARD_FINGER || type == CY_OBJ_GLOVE
+			|| type == CY_OBJ_HOVER)
+		input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+	else if (type == CY_OBJ_STYLUS)
+		input_mt_report_slot_state(input, MT_TOOL_PEN, true);
+}
+
+static void cyttsp5_report_slot_liftoff(struct cyttsp5_mt_data *md,
+		int max_slots)
+{
+	int t;
+
+	if (md->num_prv_rec == 0)
+		return;
+
+	for (t = 0; t < max_slots; t++) {
+		input_mt_slot(md->input, t);
+		input_mt_report_slot_state(md->input, MT_TOOL_FINGER, false);
+	}
+}
+
+static int cyttsp5_input_register_device(struct input_dev *input, int max_slots)
+{
+	input_mt_init_slots(input, max_slots, 0);
+
+	return input_register_device(input);
+}
+
+void cyttsp5_init_function_ptrs(struct cyttsp5_mt_data *md)
+{
+	md->mt_function.report_slot_liftoff = cyttsp5_report_slot_liftoff;
+	md->mt_function.final_sync = cyttsp5_final_sync;
+	md->mt_function.input_sync = NULL;
+	md->mt_function.input_report = cyttsp5_input_report;
+	md->mt_function.input_register_device = cyttsp5_input_register_device;
+}
+
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_platform.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_platform.c
new file mode 100644
index 0000000..9e30535
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_platform.c
@@ -0,0 +1,287 @@
+/*
+ * cyttsp5_platform.c
+ * Parade TrueTouch(TM) Standard Product V5 Platform Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2013-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+#include <linux/input/cyttsp5_platform.h>
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+/* FW for Panel ID = 0x00 */
+#include "cyttsp5_fw_pid00.h"
+static struct cyttsp5_touch_firmware cyttsp5_firmware_pid00 = {
+	.img = cyttsp4_img_pid00,
+	.size = ARRAY_SIZE(cyttsp4_img_pid00),
+	.ver = cyttsp4_ver_pid00,
+	.vsize = ARRAY_SIZE(cyttsp4_ver_pid00),
+	.panel_id = 0x00,
+};
+
+/* FW for Panel ID = 0x01 */
+#include "cyttsp5_fw_pid01.h"
+static struct cyttsp5_touch_firmware cyttsp5_firmware_pid01 = {
+	.img = cyttsp4_img_pid01,
+	.size = ARRAY_SIZE(cyttsp4_img_pid01),
+	.ver = cyttsp4_ver_pid01,
+	.vsize = ARRAY_SIZE(cyttsp4_ver_pid01),
+	.panel_id = 0x01,
+};
+
+/* FW for Panel ID not enabled (legacy) */
+#include "cyttsp5_fw.h"
+static struct cyttsp5_touch_firmware cyttsp5_firmware = {
+	.img = cyttsp4_img,
+	.size = ARRAY_SIZE(cyttsp4_img),
+	.ver = cyttsp4_ver,
+	.vsize = ARRAY_SIZE(cyttsp4_ver),
+};
+#else
+/* FW for Panel ID not enabled (legacy) */
+static struct cyttsp5_touch_firmware cyttsp5_firmware = {
+	.img = NULL,
+	.size = 0,
+	.ver = NULL,
+	.vsize = 0,
+};
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE
+/* TT Config for Panel ID = 0x00 */
+#include "cyttsp5_params_pid00.h"
+static struct touch_settings cyttsp5_sett_param_regs_pid00 = {
+	.data = (uint8_t *)&cyttsp4_param_regs_pid00[0],
+	.size = ARRAY_SIZE(cyttsp4_param_regs_pid00),
+	.tag = 0,
+};
+
+static struct touch_settings cyttsp5_sett_param_size_pid00 = {
+	.data = (uint8_t *)&cyttsp4_param_size_pid00[0],
+	.size = ARRAY_SIZE(cyttsp4_param_size_pid00),
+	.tag = 0,
+};
+
+static struct cyttsp5_touch_config cyttsp5_ttconfig_pid00 = {
+	.param_regs = &cyttsp5_sett_param_regs_pid00,
+	.param_size = &cyttsp5_sett_param_size_pid00,
+	.fw_ver = ttconfig_fw_ver_pid00,
+	.fw_vsize = ARRAY_SIZE(ttconfig_fw_ver_pid00),
+	.panel_id = 0x00,
+};
+
+/* TT Config for Panel ID = 0x01 */
+#include "cyttsp5_params_pid01.h"
+static struct touch_settings cyttsp5_sett_param_regs_pid01 = {
+	.data = (uint8_t *)&cyttsp4_param_regs_pid01[0],
+	.size = ARRAY_SIZE(cyttsp4_param_regs_pid01),
+	.tag = 0,
+};
+
+static struct touch_settings cyttsp5_sett_param_size_pid01 = {
+	.data = (uint8_t *)&cyttsp4_param_size_pid01[0],
+	.size = ARRAY_SIZE(cyttsp4_param_size_pid01),
+	.tag = 0,
+};
+
+static struct cyttsp5_touch_config cyttsp5_ttconfig_pid01 = {
+	.param_regs = &cyttsp5_sett_param_regs_pid01,
+	.param_size = &cyttsp5_sett_param_size_pid01,
+	.fw_ver = ttconfig_fw_ver_pid01,
+	.fw_vsize = ARRAY_SIZE(ttconfig_fw_ver_pid01),
+	.panel_id = 0x01,
+};
+
+/* TT Config for Panel ID not enabled (legacy)*/
+#include "cyttsp5_params.h"
+static struct touch_settings cyttsp5_sett_param_regs = {
+	.data = (uint8_t *)&cyttsp4_param_regs[0],
+	.size = ARRAY_SIZE(cyttsp4_param_regs),
+	.tag = 0,
+};
+
+static struct touch_settings cyttsp5_sett_param_size = {
+	.data = (uint8_t *)&cyttsp4_param_size[0],
+	.size = ARRAY_SIZE(cyttsp4_param_size),
+	.tag = 0,
+};
+
+static struct cyttsp5_touch_config cyttsp5_ttconfig = {
+	.param_regs = &cyttsp5_sett_param_regs,
+	.param_size = &cyttsp5_sett_param_size,
+	.fw_ver = ttconfig_fw_ver,
+	.fw_vsize = ARRAY_SIZE(ttconfig_fw_ver),
+};
+#else
+/* TT Config for Panel ID not enabled (legacy)*/
+static struct cyttsp5_touch_config cyttsp5_ttconfig = {
+	.param_regs = NULL,
+	.param_size = NULL,
+	.fw_ver = NULL,
+	.fw_vsize = 0,
+};
+#endif
+
+static struct cyttsp5_touch_firmware *cyttsp5_firmwares[] = {
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_FW_UPGRADE
+	&cyttsp5_firmware_pid00,
+	&cyttsp5_firmware_pid01,
+#endif
+	NULL, /* Last item should always be NULL */
+};
+
+static struct cyttsp5_touch_config *cyttsp5_ttconfigs[] = {
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PLATFORM_TTCONFIG_UPGRADE
+	&cyttsp5_ttconfig_pid00,
+	&cyttsp5_ttconfig_pid01,
+#endif
+	NULL, /* Last item should always be NULL */
+};
+
+struct cyttsp5_loader_platform_data _cyttsp5_loader_platform_data = {
+	.fw = &cyttsp5_firmware,
+	.ttconfig = &cyttsp5_ttconfig,
+	.fws = cyttsp5_firmwares,
+	.ttconfigs = cyttsp5_ttconfigs,
+	.flags = CY_LOADER_FLAG_NONE,
+};
+
+int cyttsp5_xres(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev)
+{
+	int rst_gpio = pdata->rst_gpio;
+	int rc = 0;
+
+	gpio_set_value(rst_gpio, 1);
+	msleep(20);
+	gpio_set_value(rst_gpio, 0);
+	msleep(40);
+	gpio_set_value(rst_gpio, 1);
+	msleep(20);
+	dev_info(dev,
+		"%s: RESET CYTTSP gpio=%d r=%d\n", __func__,
+		pdata->rst_gpio, rc);
+	return rc;
+}
+
+int cyttsp5_init(struct cyttsp5_core_platform_data *pdata,
+		int on, struct device *dev)
+{
+	int rst_gpio = pdata->rst_gpio;
+	int irq_gpio = pdata->irq_gpio;
+	int rc = 0;
+
+	if (on) {
+		rc = gpio_request(rst_gpio, NULL);
+		if (rc < 0) {
+			gpio_free(rst_gpio);
+			rc = gpio_request(rst_gpio, NULL);
+		}
+		if (rc < 0)
+			dev_err(dev, "%s: Fail request gpio=%d\n",
+				__func__, rst_gpio);
+		else {
+			rc = gpio_direction_output(rst_gpio, 1);
+			if (rc < 0) {
+				pr_err("%s: Fail set output gpio=%d\n",
+					__func__, rst_gpio);
+				gpio_free(rst_gpio);
+			} else {
+				rc = gpio_request(irq_gpio, NULL);
+				if (rc < 0) {
+					gpio_free(irq_gpio);
+					rc = gpio_request(irq_gpio, NULL);
+				}
+				if (rc < 0) {
+					dev_err(dev,
+						"%s: Fail request gpio=%d\n",
+						__func__, irq_gpio);
+					gpio_free(rst_gpio);
+				} else
+					gpio_direction_input(irq_gpio);
+
+			}
+		}
+	} else {
+		gpio_free(rst_gpio);
+		gpio_free(irq_gpio);
+	}
+
+	dev_info(dev, "%s: INIT CYTTSP RST gpio=%d and IRQ gpio=%d r=%d\n",
+		__func__, rst_gpio, irq_gpio, rc);
+	return rc;
+}
+
+static int cyttsp5_wakeup(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev, atomic_t *ignore_irq)
+{
+	return 0;
+}
+
+static int cyttsp5_sleep(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev, atomic_t *ignore_irq)
+{
+	return 0;
+}
+
+int cyttsp5_power(struct cyttsp5_core_platform_data *pdata,
+		int on, struct device *dev, atomic_t *ignore_irq)
+{
+	if (on)
+		return cyttsp5_wakeup(pdata, dev, ignore_irq);
+
+	return cyttsp5_sleep(pdata, dev, ignore_irq);
+}
+
+int cyttsp5_irq_stat(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev)
+{
+	return gpio_get_value(pdata->irq_gpio);
+}
+
+#ifdef CYTTSP5_DETECT_HW
+int cyttsp5_detect(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev, cyttsp5_platform_read read)
+{
+	int retry = 3;
+	int rc;
+	char buf[1];
+
+	while (retry--) {
+		/* Perform reset, wait for 100 ms and perform read */
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: Performing a reset\n",
+			__func__);
+		pdata->xres(pdata, dev);
+		msleep(100);
+		rc = read(dev, buf, 1);
+		if (!rc)
+			return 0;
+
+		parade_debug(dev, DEBUG_LEVEL_2,
+			"%s: Read unsuccessful, try=%d\n", __func__, 3 - retry);
+	}
+
+	return rc;
+}
+#endif
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_proximity.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_proximity.c
new file mode 100644
index 0000000..8704b03
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_proximity.c
@@ -0,0 +1,562 @@
+/*
+ * cyttsp5_proximity.c
+ * Parade TrueTouch(TM) Standard Product V5 Proximity Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2013-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+#define CYTTSP5_PROXIMITY_NAME "cyttsp5_proximity"
+
+/* Timeout value in ms. */
+#define CYTTSP5_PROXIMITY_REQUEST_EXCLUSIVE_TIMEOUT 1000
+
+#define CYTTSP5_PROXIMITY_ON 0
+#define CYTTSP5_PROXIMITY_OFF 1
+
+static inline struct cyttsp5_proximity_data *get_prox_data(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return &cd->pd;
+}
+
+static void cyttsp5_report_proximity(struct cyttsp5_proximity_data *pd,
+	bool on)
+{
+	int val = on ? CYTTSP5_PROXIMITY_ON : CYTTSP5_PROXIMITY_OFF;
+
+	input_report_abs(pd->input, ABS_DISTANCE, val);
+	input_sync(pd->input);
+}
+
+static void cyttsp5_get_touch_axis(struct cyttsp5_proximity_data *pd,
+	int *axis, int size, int max, u8 *xy_data, int bofs)
+{
+	int nbyte;
+	int next;
+
+	for (nbyte = 0, *axis = 0, next = 0; nbyte < size; nbyte++) {
+		parade_debug(pd->dev, DEBUG_LEVEL_2,
+			"%s: *axis=%02X(%d) size=%d max=%08X xy_data=%pK",
+			__func__, *axis, *axis, size, max, xy_data);
+		parade_debug(pd->dev, DEBUG_LEVEL_2,
+			" xy_data[%d]=%02X(%d) bofs=%d\n",
+			next, xy_data[next], xy_data[next], bofs);
+		*axis = *axis + ((xy_data[next] >> bofs) << (nbyte * 8));
+		next++;
+	}
+
+	*axis &= max - 1;
+
+	parade_debug(pd->dev, DEBUG_LEVEL_2,
+		"%s: *axis=%02X(%d) size=%d max=%08X xy_data=%pK\n",
+		__func__, *axis, *axis, size, max, xy_data);
+	parade_debug(pd->dev, DEBUG_LEVEL_2,
+		" xy_data[%d]=%02X(%d)\n",
+		next, xy_data[next], xy_data[next]);
+}
+
+static void cyttsp5_get_touch_hdr(struct cyttsp5_proximity_data *pd,
+	struct cyttsp5_touch *touch, u8 *xy_mode)
+{
+	struct device *dev = pd->dev;
+	struct cyttsp5_sysinfo *si = pd->si;
+	enum cyttsp5_tch_hdr hdr;
+
+	for (hdr = CY_TCH_TIME; hdr < CY_TCH_NUM_HDR; hdr++) {
+		if (!si->tch_hdr[hdr].report)
+			continue;
+		cyttsp5_get_touch_axis(pd, &touch->hdr[hdr],
+			si->tch_hdr[hdr].size,
+			si->tch_hdr[hdr].max,
+			xy_mode + si->tch_hdr[hdr].ofs,
+			si->tch_hdr[hdr].bofs);
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: get %s=%04X(%d)\n",
+			__func__, cyttsp5_tch_hdr_string[hdr],
+			touch->hdr[hdr], touch->hdr[hdr]);
+	}
+}
+
+static void cyttsp5_get_touch(struct cyttsp5_proximity_data *pd,
+	struct cyttsp5_touch *touch, u8 *xy_data)
+{
+	struct device *dev = pd->dev;
+	struct cyttsp5_sysinfo *si = pd->si;
+	enum cyttsp5_tch_abs abs;
+
+	for (abs = CY_TCH_X; abs < CY_TCH_NUM_ABS; abs++) {
+		if (!si->tch_abs[abs].report)
+			continue;
+		cyttsp5_get_touch_axis(pd, &touch->abs[abs],
+			si->tch_abs[abs].size,
+			si->tch_abs[abs].max,
+			xy_data + si->tch_abs[abs].ofs,
+			si->tch_abs[abs].bofs);
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: get %s=%04X(%d)\n",
+			__func__, cyttsp5_tch_abs_string[abs],
+			touch->abs[abs], touch->abs[abs]);
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: x=%04X(%d) y=%04X(%d)\n",
+		__func__, touch->abs[CY_TCH_X], touch->abs[CY_TCH_X],
+		touch->abs[CY_TCH_Y], touch->abs[CY_TCH_Y]);
+}
+
+static void cyttsp5_get_proximity_touch(struct cyttsp5_proximity_data *pd,
+		struct cyttsp5_touch *tch, int num_cur_tch)
+{
+	struct cyttsp5_sysinfo *si = pd->si;
+	int i;
+
+	for (i = 0; i < num_cur_tch; i++) {
+		cyttsp5_get_touch(pd, tch, si->xy_data +
+			(i * si->desc.tch_record_size));
+
+		/* Check for proximity event */
+		if (tch->abs[CY_TCH_O] == CY_OBJ_PROXIMITY) {
+			if (tch->abs[CY_TCH_E] == CY_EV_TOUCHDOWN)
+				cyttsp5_report_proximity(pd, true);
+			else if (tch->abs[CY_TCH_E] == CY_EV_LIFTOFF)
+				cyttsp5_report_proximity(pd, false);
+			break;
+		}
+	}
+}
+
+/* read xy_data for all current touches */
+static int cyttsp5_xy_worker(struct cyttsp5_proximity_data *pd)
+{
+	struct device *dev = pd->dev;
+	struct cyttsp5_sysinfo *si = pd->si;
+	struct cyttsp5_touch tch;
+	u8 num_cur_tch;
+
+	cyttsp5_get_touch_hdr(pd, &tch, si->xy_mode + 3);
+
+	num_cur_tch = tch.hdr[CY_TCH_NUM];
+	if (num_cur_tch > si->sensing_conf_data.max_tch) {
+		dev_err(dev, "%s: Num touch err detected (n=%d)\n",
+			__func__, num_cur_tch);
+		num_cur_tch = si->sensing_conf_data.max_tch;
+	}
+
+	if (tch.hdr[CY_TCH_LO])
+		parade_debug(dev, DEBUG_LEVEL_1, "%s: Large area detected\n",
+		__func__);
+
+	/* extract xy_data for all currently reported touches */
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: extract data num_cur_rec=%d\n",
+		__func__, num_cur_tch);
+	if (num_cur_tch)
+		cyttsp5_get_proximity_touch(pd, &tch, num_cur_tch);
+	else
+		cyttsp5_report_proximity(pd, false);
+
+	return 0;
+}
+
+static int cyttsp5_proximity_attention(struct device *dev)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+	int rc = 0;
+
+	if (pd->si->xy_mode[2] != pd->si->desc.tch_report_id)
+		return 0;
+
+	mutex_lock(&pd->prox_lock);
+	rc = cyttsp5_xy_worker(pd);
+	mutex_unlock(&pd->prox_lock);
+	if (rc < 0)
+		dev_err(dev, "%s: xy_worker error r=%d\n", __func__, rc);
+
+	return rc;
+}
+
+static int cyttsp5_startup_attention(struct device *dev)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+
+	mutex_lock(&pd->prox_lock);
+	cyttsp5_report_proximity(pd, false);
+	mutex_unlock(&pd->prox_lock);
+
+	return 0;
+}
+
+static int _cyttsp5_set_proximity_via_touchmode_enabled(
+		struct cyttsp5_proximity_data *pd, bool enable)
+{
+	struct device *dev = pd->dev;
+	u32 touchmode_enabled;
+	int rc;
+
+	rc = cyttsp5_request_nonhid_get_param(dev, 0,
+			CY_RAM_ID_TOUCHMODE_ENABLED, &touchmode_enabled);
+	if (rc)
+		return rc;
+
+	if (enable)
+		touchmode_enabled |= 0x80;
+	else
+		touchmode_enabled &= 0x7F;
+
+	rc = cyttsp5_request_nonhid_set_param(dev, 0,
+			CY_RAM_ID_TOUCHMODE_ENABLED, touchmode_enabled,
+			CY_RAM_ID_TOUCHMODE_ENABLED_SIZE);
+
+	return rc;
+}
+
+static int _cyttsp5_set_proximity_via_proximity_enable(
+		struct cyttsp5_proximity_data *pd, bool enable)
+{
+	struct device *dev = pd->dev;
+	u32 proximity_enable;
+	int rc;
+
+	rc = cyttsp5_request_nonhid_get_param(dev, 0,
+			CY_RAM_ID_PROXIMITY_ENABLE, &proximity_enable);
+	if (rc)
+		return rc;
+
+	if (enable)
+		proximity_enable |= 0x01;
+	else
+		proximity_enable &= 0xFE;
+
+	rc = cyttsp5_request_nonhid_set_param(dev, 0,
+			CY_RAM_ID_PROXIMITY_ENABLE, proximity_enable,
+			CY_RAM_ID_PROXIMITY_ENABLE_SIZE);
+
+	return rc;
+}
+
+static int _cyttsp5_set_proximity(struct cyttsp5_proximity_data *pd,
+		bool enable)
+{
+	if (!IS_PIP_VER_GE(pd->si, 1, 4))
+		return _cyttsp5_set_proximity_via_touchmode_enabled(pd,
+				enable);
+
+	return _cyttsp5_set_proximity_via_proximity_enable(pd, enable);
+}
+
+static int _cyttsp5_proximity_enable(struct cyttsp5_proximity_data *pd)
+{
+	struct device *dev = pd->dev;
+	int rc = 0;
+
+	pm_runtime_get_sync(dev);
+
+	rc = cyttsp5_request_exclusive(dev,
+			CYTTSP5_PROXIMITY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+				__func__, rc);
+		goto exit;
+	}
+
+	rc = _cyttsp5_set_proximity(pd, true);
+	if (rc < 0) {
+		dev_err(dev,
+			"%s: Error on request enable proximity scantype r=%d\n",
+			__func__, rc);
+		goto exit_release;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: setup subscriptions\n", __func__);
+
+	/* set up touch call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_IRQ, CYTTSP5_PROXIMITY_NAME,
+		cyttsp5_proximity_attention, CY_MODE_OPERATIONAL);
+
+	/* set up startup call back */
+	_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP,
+		CYTTSP5_PROXIMITY_NAME, cyttsp5_startup_attention, 0);
+
+exit_release:
+	cyttsp5_release_exclusive(dev);
+exit:
+	return rc;
+}
+
+static int _cyttsp5_proximity_disable(struct cyttsp5_proximity_data *pd,
+		bool force)
+{
+	struct device *dev = pd->dev;
+	int rc = 0;
+
+	rc = cyttsp5_request_exclusive(dev,
+			CYTTSP5_PROXIMITY_REQUEST_EXCLUSIVE_TIMEOUT);
+	if (rc < 0) {
+		dev_err(dev, "%s: Error on request exclusive r=%d\n",
+				__func__, rc);
+		goto exit;
+	}
+
+	rc = _cyttsp5_set_proximity(pd, false);
+	if (rc < 0) {
+		dev_err(dev,
+			"%s: Error on request disable proximity scan r=%d\n",
+			__func__, rc);
+		goto exit_release;
+	}
+
+exit_release:
+	cyttsp5_release_exclusive(dev);
+
+exit:
+	if (!rc || force) {
+		_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_IRQ,
+			CYTTSP5_PROXIMITY_NAME, cyttsp5_proximity_attention,
+			CY_MODE_OPERATIONAL);
+
+		_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_PROXIMITY_NAME, cyttsp5_startup_attention, 0);
+	}
+
+	pm_runtime_put(dev);
+
+	return rc;
+}
+
+static ssize_t cyttsp5_proximity_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+	int val = 0;
+
+	mutex_lock(&pd->sysfs_lock);
+	val = pd->enable_count;
+	mutex_unlock(&pd->sysfs_lock);
+
+	return scnprintf(buf, CY_MAX_PRBUF_SIZE, "%d\n", val);
+}
+
+static ssize_t cyttsp5_proximity_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+	unsigned long value;
+	int rc;
+
+	rc = kstrtoul(buf, 10, &value);
+	if (rc < 0 || (value != 0 && value != 1)) {
+		dev_err(dev, "%s: Invalid value\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&pd->sysfs_lock);
+	if (value) {
+		if (pd->enable_count++) {
+			parade_debug(dev,
+				DEBUG_LEVEL_2, "%s: '%s' already enabled\n",
+				__func__, pd->input->name);
+		} else {
+			rc = _cyttsp5_proximity_enable(pd);
+			if (rc)
+				pd->enable_count--;
+		}
+	} else {
+		if (--pd->enable_count) {
+			if (pd->enable_count < 0) {
+				dev_err(dev, "%s: '%s' unbalanced disable\n",
+					__func__, pd->input->name);
+				pd->enable_count = 0;
+			}
+		} else {
+			rc = _cyttsp5_proximity_disable(pd, false);
+			if (rc)
+				pd->enable_count++;
+		}
+	}
+	mutex_unlock(&pd->sysfs_lock);
+
+	if (rc)
+		return rc;
+
+	return size;
+}
+
+static DEVICE_ATTR(prox_enable, 0600,
+		cyttsp5_proximity_enable_show,
+		cyttsp5_proximity_enable_store);
+
+static int cyttsp5_setup_input_device_and_sysfs(struct device *dev)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+	int signal = CY_IGNORE_VALUE;
+	int i;
+	int rc;
+
+	rc = device_create_file(dev, &dev_attr_prox_enable);
+	if (rc) {
+		dev_err(dev, "%s: Error, could not create enable\n",
+				__func__);
+		goto exit;
+	}
+
+	parade_debug(dev, DEBUG_LEVEL_2, "%s: Initialize event signals\n",
+				__func__);
+
+	__set_bit(EV_ABS, pd->input->evbit);
+
+	/* set event signal capabilities */
+	for (i = 0; i < NUM_SIGNALS(pd->pdata->frmwrk); i++) {
+		signal = PARAM_SIGNAL(pd->pdata->frmwrk, i);
+		if (signal != CY_IGNORE_VALUE)
+			input_set_abs_params(pd->input, signal,
+				PARAM_MIN(pd->pdata->frmwrk, i),
+				PARAM_MAX(pd->pdata->frmwrk, i),
+				PARAM_FUZZ(pd->pdata->frmwrk, i),
+				PARAM_FLAT(pd->pdata->frmwrk, i));
+
+	}
+
+	rc = input_register_device(pd->input);
+	if (rc) {
+		dev_err(dev, "%s: Error, failed register input device r=%d\n",
+			__func__, rc);
+		goto unregister_enable;
+	}
+
+	pd->input_device_registered = true;
+	return rc;
+
+unregister_enable:
+	device_remove_file(dev, &dev_attr_prox_enable);
+exit:
+	return rc;
+}
+
+static int cyttsp5_setup_input_attention(struct device *dev)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+	int rc;
+
+	pd->si = _cyttsp5_request_sysinfo(dev);
+	if (!pd->si)
+		return -EINVAL;
+
+	rc = cyttsp5_setup_input_device_and_sysfs(dev);
+	if (!rc)
+		rc = _cyttsp5_set_proximity(pd, false);
+
+	_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+		CYTTSP5_PROXIMITY_NAME, cyttsp5_setup_input_attention, 0);
+
+	return rc;
+}
+
+int cyttsp5_proximity_probe(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+	struct cyttsp5_proximity_data *pd = &cd->pd;
+	struct cyttsp5_platform_data *pdata = dev_get_platdata(dev);
+	struct cyttsp5_proximity_platform_data *prox_pdata;
+	int rc = 0;
+
+	if (!pdata || !pdata->prox_pdata) {
+		dev_err(dev, "%s: Missing platform data\n", __func__);
+		rc = -ENODEV;
+		goto error_no_pdata;
+	}
+	prox_pdata = pdata->prox_pdata;
+
+	mutex_init(&pd->prox_lock);
+	mutex_init(&pd->sysfs_lock);
+	pd->dev = dev;
+	pd->pdata = prox_pdata;
+
+	/* Create the input device and register it. */
+	parade_debug(dev, DEBUG_LEVEL_2,
+		"%s: Create the input device and register it\n", __func__);
+	pd->input = input_allocate_device();
+	if (!pd->input) {
+		dev_err(dev, "%s: Error, failed to allocate input device\n",
+			__func__);
+		rc = -ENODEV;
+		goto error_alloc_failed;
+	}
+
+	if (pd->pdata->inp_dev_name)
+		pd->input->name = pd->pdata->inp_dev_name;
+	else
+		pd->input->name = CYTTSP5_PROXIMITY_NAME;
+	scnprintf(pd->phys, sizeof(pd->phys), "%s/input%d", dev_name(dev),
+			cd->phys_num++);
+	pd->input->phys = pd->phys;
+	pd->input->dev.parent = pd->dev;
+	input_set_drvdata(pd->input, pd);
+
+	/* get sysinfo */
+	pd->si = _cyttsp5_request_sysinfo(dev);
+
+	if (pd->si) {
+		rc = cyttsp5_setup_input_device_and_sysfs(dev);
+		if (rc)
+			goto error_init_input;
+
+		rc = _cyttsp5_set_proximity(pd, false);
+	} else {
+		dev_err(dev, "%s: Fail get sysinfo pointer from core p=%pK\n",
+			__func__, pd->si);
+		_cyttsp5_subscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_PROXIMITY_NAME, cyttsp5_setup_input_attention,
+			0);
+	}
+
+	return 0;
+
+error_init_input:
+	input_free_device(pd->input);
+error_alloc_failed:
+error_no_pdata:
+	dev_err(dev, "%s failed.\n", __func__);
+	return rc;
+}
+
+int cyttsp5_proximity_release(struct device *dev)
+{
+	struct cyttsp5_proximity_data *pd = get_prox_data(dev);
+
+	if (pd->input_device_registered) {
+		/* Disable proximity sensing */
+		mutex_lock(&pd->sysfs_lock);
+		if (pd->enable_count)
+			_cyttsp5_proximity_disable(pd, true);
+		mutex_unlock(&pd->sysfs_lock);
+		device_remove_file(dev, &dev_attr_prox_enable);
+		input_unregister_device(pd->input);
+	} else {
+		input_free_device(pd->input);
+		_cyttsp5_unsubscribe_attention(dev, CY_ATTEN_STARTUP,
+			CYTTSP5_PROXIMITY_NAME, cyttsp5_setup_input_attention,
+			0);
+	}
+
+	return 0;
+}
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_regs.h b/drivers/input/touchscreen/cyttsp5/cyttsp5_regs.h
new file mode 100644
index 0000000..933daae
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_regs.h
@@ -0,0 +1,1195 @@
+/*
+ * cyttsp5_regs.h
+ * Parade TrueTouch(TM) Standard Product V5 Registers.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#ifndef _CYTTSP5_REGS_H
+#define _CYTTSP5_REGS_H
+
+#include <linux/device.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#elif defined(CONFIG_FB)
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#include <asm/unaligned.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/input/cyttsp5_core.h>
+
+#include <linux/timer.h>
+#include <linux/timex.h>
+#include <linux/rtc.h>
+
+
+/* #define EASYWAKE_TSG6 */
+
+#define CY_FW_FILE_PREFIX            "cyttsp5_fw"
+#define CY_FW_FILE_SUFFIX            ".bin"
+#define CY_FW_FILE_NAME              "cyttsp5_fw.bin"
+#define TTHE_TUNER_SUPPORT
+
+#ifdef TTHE_TUNER_SUPPORT
+#define CYTTSP5_TTHE_TUNER_FILE_NAME "tthe_tuner"
+#endif
+
+#define CY_MAX_PRBUF_SIZE            PIPE_BUF
+#define CY_PR_TRUNCATED              " truncated..."
+
+#define CY_DEFAULT_CORE_ID           "cyttsp5_core0"
+#define CY_MAX_NUM_CORE_DEVS         5
+#define CY_IRQ_ASSERTED_VALUE        0
+
+
+#ifdef CY_ENABLE_MAX_ELEN
+#define CY_MAX_ELEN 100
+#endif
+
+enum PARADE_DEBUG_LEVEL {
+	DEBUG_LEVEL_0,/*no debug info*/
+	DEBUG_LEVEL_1,/*only print dev_dbg info*/
+	DEBUG_LEVEL_2,/*print dev_dbg and dev_vdbg info*/
+	DEBUG_LEVEL_NUM
+};
+
+#define CY_INITIAL_DEBUG_LEVEL 0
+#define CY_INITIAL_SHOW_TIME_STAMP 0
+
+/*
+ * those current debug level smaller than setted debug level
+ * will be printed out
+ */
+
+#define parade_debug(dev, dlevel, format, arg...) \
+	do { \
+		struct cyttsp5_core_data *cd_tmp = dev_get_drvdata(dev);\
+		if (cd_tmp->debug_level >= dlevel) {\
+			dev_dbg(dev, format, ##arg);\
+		} \
+	} while (0)
+
+
+/* HID */
+#define HID_CYVENDOR                   0xff010000
+#define CY_HID_VENDOR_ID               0x04B4
+#define CY_HID_BL_PRODUCT_ID           0xC100
+#define CY_HID_APP_PRODUCT_ID          0xC101
+#define CY_HID_VERSION                 0x0100
+#define CY_HID_APP_REPORT_ID           0xF7
+#define CY_HID_BL_REPORT_ID            0xFF
+
+#define HID_INVALID_REPORT_ID          0x0
+#define HID_TOUCH_REPORT_ID            0x1
+#define HID_BTN_REPORT_ID              0x3
+#define HID_WAKEUP_REPORT_ID           0x4
+#define HID_NOISE_METRIC_REPORT_ID     0x5
+#define HID_TRACKING_HEATMAP_REPOR_ID  0xE
+#define HID_SENSOR_DATA_REPORT_ID      0xF
+#define HID_APP_RESPONSE_REPORT_ID     0x1F
+#define HID_APP_OUTPUT_REPORT_ID       0x2F
+#define HID_BL_RESPONSE_REPORT_ID      0x30
+#define HID_BL_OUTPUT_REPORT_ID        0x40
+#define HID_RESPONSE_REPORT_ID         0xF0
+
+#define HID_OUTPUT_RESPONSE_REPORT_OFFSET  2
+#define HID_OUTPUT_RESPONSE_CMD_OFFSET 4
+#define HID_OUTPUT_RESPONSE_CMD_MASK   0x7F
+#define HID_OUTPUT_CMD_OFFSET          6
+#define HID_OUTPUT_CMD_MASK            0x7F
+
+#define HID_SYSINFO_CYDATA_OFFSET      5
+#define HID_SYSINFO_SENSING_OFFSET     33
+#define HID_SYSINFO_BTN_OFFSET         48
+#define HID_SYSINFO_BTN_MASK           0xFF
+#define HID_SYSINFO_MAX_BTN            8
+
+#define HID_POWER_ON                   0x0
+#define HID_POWER_SLEEP                0x1
+#define HID_LENGTH_BYTES               2
+#define HID_LENGTH_AND_REPORT_ID_BYTES 3
+
+/*  Timeout in ms */
+#define CY_REQUEST_EXCLUSIVE_TIMEOUT   8000
+#define CY_WATCHDOG_TIMEOUT            1000
+#define CY_HID_RESET_TIMEOUT           1000
+#define CY_HID_AUTO_CALI_CPLT_TIMEOUT  2500
+/* HID_DESCRIPTOR_TIMEOUT value based on FW spec (CAL_OS) */
+#define CY_HID_GET_HID_DESCRIPTOR_TIMEOUT     1000
+#define CY_HID_GET_REPORT_DESCRIPTOR_TIMEOUT  500
+#define CY_HID_SET_POWER_TIMEOUT              500
+#ifdef VERBOSE_DEBUG
+#define CY_HID_OUTPUT_TIMEOUT          2000
+#else
+#define CY_HID_OUTPUT_TIMEOUT          200
+#endif
+#define CY_HID_OUTPUT_START_BOOTLOADER_TIMEOUT 2000
+#define CY_HID_OUTPUT_USER_TIMEOUT             8000
+#define CY_HID_OUTPUT_GET_SYSINFO_TIMEOUT      3000
+#define CY_HID_OUTPUT_CALIBRATE_IDAC_TIMEOUT   5000
+#define CY_HID_OUTPUT_WRITE_CONF_BLOCK_TIMEOUT 400
+#define CY_HID_OUTPUT_RUN_SELF_TEST_TIMEOUT    10000
+#define CY_HID_OUTPUT_BL_INITIATE_BL_TIMEOUT   20000
+#define CY_HID_OUTPUT_BL_PROGRAM_AND_VERIFY_TIMEOUT 400
+
+#define CY_WATCHDOG_RETRY_COUNT                60
+
+/* maximum number of concurrent tracks */
+#define TOUCH_REPORT_SIZE          10
+#define TOUCH_INPUT_HEADER_SIZE    7
+#define TOUCH_COUNT_BYTE_OFFSET    5
+#define BTN_REPORT_SIZE            9
+#define BTN_INPUT_HEADER_SIZE      5
+#define SENSOR_REPORT_SIZE         150
+#define SENSOR_HEADER_SIZE         4
+
+/* helpers */
+#define GET_NUM_TOUCHES(x)         ((x) & 0x1F)
+#define IS_LARGE_AREA(x)           ((x) & 0x20)
+#define IS_BAD_PKT(x)              ((x) & 0x20)
+#define IS_TMO(t)                  ((t) == 0)
+#define HI_BYTE(x)                 (u8)(((x) >> 8) & 0xFF)
+#define LOW_BYTE(x)                (u8)((x) & 0xFF)
+#define SET_CMD_LOW(byte, bits) \
+	((byte) = (((byte) & 0xF0) | ((bits) & 0x0F)))
+#define SET_CMD_HIGH(byte, bits) \
+	((byte) = (((byte) & 0x0F) | ((bits) & 0xF0)))
+
+#define GET_MASK(length) \
+	((1 << length) - 1)
+#define GET_FIELD(name, length, shift) \
+	((name >> shift) & GET_MASK(length))
+
+#define HID_ITEM_SIZE_MASK         0x03
+#define HID_ITEM_TYPE_MASK         0x0C
+#define HID_ITEM_TAG_MASK          0xF0
+
+#define HID_ITEM_SIZE_SHIFT        0
+#define HID_ITEM_TYPE_SHIFT        2
+#define HID_ITEM_TAG_SHIFT         4
+
+#define HID_GET_ITEM_SIZE(x)  \
+	((x & HID_ITEM_SIZE_MASK) >> HID_ITEM_SIZE_SHIFT)
+#define HID_GET_ITEM_TYPE(x) \
+	((x & HID_ITEM_TYPE_MASK) >> HID_ITEM_TYPE_SHIFT)
+#define HID_GET_ITEM_TAG(x) \
+	((x & HID_ITEM_TAG_MASK) >> HID_ITEM_TAG_SHIFT)
+
+#define IS_DEEP_SLEEP_CONFIGURED(x) \
+		((x) == 0 || (x) == 0xFF)
+
+#define IS_PIP_VER_GE(p, maj, min) \
+		((p)->cydata.pip_ver_major < (maj) ? \
+			0 : \
+			((p)->cydata.pip_ver_minor < (min) ? \
+				0 : \
+				1))
+
+/* drv_debug commands */
+#define CY_DBG_SUSPEND                  4
+#define CY_DBG_RESUME                   5
+#define CY_DBG_SOFT_RESET               97
+#define CY_DBG_RESET                    98
+#define CY_DBG_HID_RESET                50
+#define CY_DBG_HID_GET_REPORT           51
+#define CY_DBG_HID_SET_REPORT           52
+#define CY_DBG_HID_SET_POWER_ON         53
+#define CY_DBG_HID_SET_POWER_SLEEP      54
+#define CY_DBG_HID_NULL                 100
+#define CY_DBG_HID_ENTER_BL             101
+#define CY_DBG_HID_SYSINFO              102
+#define CY_DBG_HID_SUSPEND_SCAN         103
+#define CY_DBG_HID_RESUME_SCAN          104
+#define CY_DBG_HID_STOP_WD              105
+#define CY_DBG_HID_START_WD             106
+
+#define CY_TTHE_TUNER_EXIT              107
+#define	CY_TTHE_BUF_CLEAN               108
+
+/*
+ * Commands that require additional parameters
+ * will be in the 200 range. Commands that do not
+ * require additional parameters remain below 200.
+ */
+#define CY_DBG_REPORT_LEVEL             200
+#define CY_DBG_WATCHDOG_INTERVAL        201
+#define CY_DBG_SHOW_TIMESTAMP           202
+
+/* Recognized usages */
+/* undef them first for possible redefinition in Linux */
+#undef HID_DI_PRESSURE
+#undef HID_DI_TIP
+#undef HID_DI_CONTACTID
+#undef HID_DI_CONTACTCOUNT
+#undef HID_DI_SCANTIME
+#define HID_DI_PRESSURE         0x000d0030
+#define HID_DI_TIP              0x000d0042
+#define HID_DI_CONTACTID        0x000d0051
+#define HID_DI_CONTACTCOUNT     0x000d0054
+#define HID_DI_SCANTIME         0x000d0056
+
+/* Parade vendor specific usages */
+#define HID_CY_UNDEFINED        0xff010000
+#define HID_CY_BOOTLOADER       0xff010001
+#define HID_CY_TOUCHAPPLICATION 0xff010002
+#define HID_CY_BUTTONS          0xff010020
+#define HID_CY_GENERICITEM      0xff010030
+#define HID_CY_LARGEOBJECT      0xff010040
+#define HID_CY_NOISEEFFECTS     0xff010041
+#define HID_CY_REPORTCOUNTER    0xff010042
+#define HID_CY_TOUCHTYPE        0xff010060
+#define HID_CY_EVENTID          0xff010061
+#define HID_CY_MAJORAXISLENGTH  0xff010062
+#define HID_CY_MINORAXISLENGTH  0xff010063
+#define HID_CY_ORIENTATION      0xff010064
+#define HID_CY_BUTTONSIGNAL     0xff010065
+#define HID_CY_MAJOR_CONTACT_AXIS_LENGTH 0xff010066
+#define HID_CY_MINOR_CONTACT_AXIS_LENGTH 0xff010067
+#define HID_CY_TCH_COL_USAGE_PG 0x000D0022
+#define HID_CY_BTN_COL_USAGE_PG 0xFF010020
+
+#define PANEL_ID_NOT_ENABLED    0xFF
+
+#define GESTURE_DOUBLE_TAP         (1)
+#define GESTURE_TWO_FINGERS_SLIDE  (2)
+#define GESTURE_TOUCH_DETECTED     (3)
+#define GESTURE_PUSH_BUTTON        (4)
+#define GESTURE_SINGLE_SLIDE_DE_TX (5)
+#define GESTURE_SINGLE_SLIDE_IN_TX (6)
+#define GESTURE_SINGLE_SLIDE_DE_RX (7)
+#define GESTURE_SINGLE_SLIDE_IN_RX (8)
+#define GESTURE_SINGLE_MULTI_TOUCH (16)
+#define GESTURE_SINGLE_QUICK_CALL  (99)
+
+/* FW RAM parameters */
+#define CY_RAM_ID_TOUCHMODE_ENABLED      0x02
+#define CY_RAM_ID_PROXIMITY_ENABLE       0x20
+#define CY_RAM_ID_TOUCHMODE_ENABLED_SIZE 1
+#define CY_RAM_ID_PROXIMITY_ENABLE_SIZE  1
+
+/* abs signal capabilities offsets in the frameworks array */
+enum cyttsp5_sig_caps {
+	CY_SIGNAL_OST,
+	CY_MIN_OST,
+	CY_MAX_OST,
+	CY_FUZZ_OST,
+	CY_FLAT_OST,
+	CY_NUM_ABS_SET /* number of signal capability fields */
+};
+
+/* helpers */
+#define NUM_SIGNALS(frmwrk) ((frmwrk)->size / CY_NUM_ABS_SET)
+#define PARAM(frmwrk, sig_ost, cap_ost) \
+		((frmwrk)->abs[((sig_ost) * CY_NUM_ABS_SET) + (cap_ost)])
+
+#define PARAM_SIGNAL(frmwrk, sig_ost) PARAM(frmwrk, sig_ost, CY_SIGNAL_OST)
+#define PARAM_MIN(frmwrk, sig_ost)    PARAM(frmwrk, sig_ost, CY_MIN_OST)
+#define PARAM_MAX(frmwrk, sig_ost)    PARAM(frmwrk, sig_ost, CY_MAX_OST)
+#define PARAM_FUZZ(frmwrk, sig_ost)   PARAM(frmwrk, sig_ost, CY_FUZZ_OST)
+#define PARAM_FLAT(frmwrk, sig_ost)   PARAM(frmwrk, sig_ost, CY_FLAT_OST)
+
+/* abs axis signal offsets in the framworks array  */
+enum cyttsp5_sig_ost {
+	CY_ABS_X_OST,
+	CY_ABS_Y_OST,
+	CY_ABS_P_OST,
+	CY_ABS_W_OST,
+	CY_ABS_ID_OST,
+	CY_ABS_MAJ_OST,
+	CY_ABS_MIN_OST,
+	CY_ABS_OR_OST,
+	CY_ABS_TOOL_OST,
+	CY_ABS_D_OST,
+	CY_NUM_ABS_OST /* number of abs signals */
+};
+
+enum hid_command {
+	HID_CMD_RESERVED,
+	HID_CMD_RESET,
+	HID_CMD_GET_REPORT,
+	HID_CMD_SET_REPORT,
+	HID_CMD_GET_IDLE,
+	HID_CMD_SET_IDLE,
+	HID_CMD_GET_PROTOCOL,
+	HID_CMD_SET_PROTOCOL,
+	HID_CMD_SET_POWER,
+	HID_CMD_VENDOR = 0xE,
+};
+
+enum hid_output_cmd_type {
+	HID_OUTPUT_CMD_APP,
+	HID_OUTPUT_CMD_BL,
+};
+
+enum hid_output {
+	HID_OUTPUT_NULL,
+	HID_OUTPUT_START_BOOTLOADER,
+	HID_OUTPUT_GET_SYSINFO,
+	HID_OUTPUT_SUSPEND_SCANNING,
+	HID_OUTPUT_RESUME_SCANNING,
+	HID_OUTPUT_GET_PARAM,
+	HID_OUTPUT_SET_PARAM,
+	HID_OUTPUT_GET_NOISE_METRICS,
+	HID_OUTPUT_RESERVED,
+	HID_OUTPUT_ENTER_EASYWAKE_STATE,
+	HID_OUTPUT_EXIT_EASYWAKE_STATE = 0x10, //CY_GES_WAKEUP
+	HID_OUTPUT_VERIFY_CONFIG_BLOCK_CRC = 0x20,
+	HID_OUTPUT_GET_CONFIG_ROW_SIZE,
+	HID_OUTPUT_READ_CONF_BLOCK,
+	HID_OUTPUT_WRITE_CONF_BLOCK,
+	HID_OUTPUT_GET_DATA_STRUCTURE,
+	HID_OUTPUT_LOAD_SELF_TEST_PARAM,
+	HID_OUTPUT_RUN_SELF_TEST,
+	HID_OUTPUT_GET_SELF_TEST_RESULT,
+	HID_OUTPUT_CALIBRATE_IDACS,
+	HID_OUTPUT_INITIALIZE_BASELINES,
+	HID_OUTPUT_EXEC_PANEL_SCAN,
+	HID_OUTPUT_RETRIEVE_PANEL_SCAN,
+	HID_OUTPUT_START_SENSOR_DATA_MODE,
+	HID_OUTPUT_STOP_SENSOR_DATA_MODE,
+	HID_OUTPUT_START_TRACKING_HEATMAP_MODE,
+	HID_OUTPUT_INT_PIN_OVERRIDE = 0x40,
+	HID_OUTPUT_STORE_PANEL_SCAN = 0x60,
+	HID_OUTPUT_PROCESS_PANEL_SCAN,
+	HID_OUTPUT_DISCARD_INPUT_REPORT,
+	HID_OUTPUT_LAST,
+	HID_OUTPUT_USER_CMD,
+};
+
+enum hid_output_bl {
+	HID_OUTPUT_BL_VERIFY_APP_INTEGRITY = 0x31,
+	HID_OUTPUT_BL_GET_INFO = 0x38,
+	HID_OUTPUT_BL_PROGRAM_AND_VERIFY,
+	HID_OUTPUT_BL_LAUNCH_APP = 0x3B,
+	HID_OUTPUT_BL_GET_PANEL_ID = 0x3E,
+	HID_OUTPUT_BL_INITIATE_BL = 0x48,
+	HID_OUTPUT_BL_LAST,
+};
+
+#define HID_OUTPUT_BL_SOP          0x1
+#define HID_OUTPUT_BL_EOP          0x17
+
+enum hid_output_bl_status {
+	ERROR_SUCCESS,
+	ERROR_KEY,
+	ERROR_VERIFICATION,
+	ERROR_LENGTH,
+	ERROR_DATA,
+	ERROR_COMMAND,
+	ERROR_CRC = 8,
+	ERROR_FLASH_ARRAY,
+	ERROR_FLASH_ROW,
+	ERROR_FLASH_PROTECTION,
+	ERROR_UKNOWN = 15,
+	ERROR_INVALID,
+};
+
+enum cyttsp5_mode {
+	CY_MODE_UNKNOWN,
+	CY_MODE_BOOTLOADER,
+	CY_MODE_OPERATIONAL,
+};
+
+enum cyttsp5_cmd_status {
+	CY_CMD_STATUS_SUCCESS,
+	CY_CMD_STATUS_FAILURE,
+};
+
+enum {
+	CY_IC_GRPNUM_RESERVED,
+	CY_IC_GRPNUM_CMD_REGS,
+	CY_IC_GRPNUM_TCH_REP,
+	CY_IC_GRPNUM_DATA_REC,
+	CY_IC_GRPNUM_TEST_REC,
+	CY_IC_GRPNUM_PCFG_REC,
+	CY_IC_GRPNUM_TCH_PARM_VAL,
+	CY_IC_GRPNUM_TCH_PARM_SIZE,
+	CY_IC_GRPNUM_RESERVED1,
+	CY_IC_GRPNUM_RESERVED2,
+	CY_IC_GRPNUM_OPCFG_REC,
+	CY_IC_GRPNUM_DDATA_REC,
+	CY_IC_GRPNUM_MDATA_REC,
+	CY_IC_GRPNUM_TEST_REGS,
+	CY_IC_GRPNUM_BTN_KEYS,
+	CY_IC_GRPNUM_TTHE_REGS,
+	CY_IC_GRPNUM_SENSING_CONF,
+	CY_IC_GRPNUM_NUM,
+};
+
+enum cyttsp5_event_id {
+	CY_EV_NO_EVENT,
+	CY_EV_TOUCHDOWN,
+	CY_EV_MOVE,             /* significant displacement (> act dist) */
+	CY_EV_LIFTOFF,          /* record reports last position */
+};
+
+enum cyttsp5_object_id {
+	CY_OBJ_STANDARD_FINGER,
+	CY_OBJ_PROXIMITY,
+	CY_OBJ_STYLUS,
+	CY_OBJ_HOVER,
+	CY_OBJ_GLOVE,
+};
+
+enum cyttsp5_self_test_result {
+	CY_ST_RESULT_PASS,
+	CY_ST_RESULT_FAIL,
+	CY_ST_RESULT_HOST_MUST_INTERPRET = 0xFF,
+};
+
+enum cyttsp5_self_test_id {
+	CY_ST_ID_NULL,
+	CY_ST_ID_BIST,
+	CY_ST_ID_SHORTS,
+	CY_ST_ID_OPENS,
+	CY_ST_ID_AUTOSHORTS,
+	CY_ST_ID_CM_PANEL,
+	CY_ST_ID_CP_PANEL,
+	CY_ST_ID_CM_BUTTON,
+	CY_ST_ID_CP_BUTTON,
+};
+
+#define CY_NUM_MFGID               8
+
+/* System Information interface definitions */
+struct cyttsp5_cydata_dev {
+	u8 pip_ver_major;
+	u8 pip_ver_minor;
+	__le16 fw_pid;
+	u8 fw_ver_major;
+	u8 fw_ver_minor;
+	__le32 revctrl;
+	__le16 fw_ver_conf;
+	u8 bl_ver_major;
+	u8 bl_ver_minor;
+	__le16 jtag_si_id_l;
+	__le16 jtag_si_id_h;
+	u8 mfg_id[CY_NUM_MFGID];
+	__le16 post_code;
+} __packed;
+
+struct cyttsp5_sensing_conf_data_dev {
+	u8 electrodes_x;
+	u8 electrodes_y;
+	__le16 len_x;
+	__le16 len_y;
+	__le16 res_x;
+	__le16 res_y;
+	__le16 max_z;
+	u8 origin_x;
+	u8 origin_y;
+	u8 panel_id;
+	u8 btn;
+	u8 scan_mode;
+	u8 max_num_of_tch_per_refresh_cycle;
+} __packed;
+
+struct cyttsp5_cydata {
+	u8 pip_ver_major;
+	u8 pip_ver_minor;
+	u8 bl_ver_major;
+	u8 bl_ver_minor;
+	u8 fw_ver_major;
+	u8 fw_ver_minor;
+	u16 fw_pid;
+	u16 fw_ver_conf;
+	u16 post_code;
+	u32 revctrl;
+	u16 jtag_id_l;
+	u16 jtag_id_h;
+	u8 mfg_id[CY_NUM_MFGID];
+};
+
+struct cyttsp5_sensing_conf_data {
+	u16 res_x;
+	u16 res_y;
+	u16 max_z;
+	u16 len_x;
+	u16 len_y;
+	u8 electrodes_x;
+	u8 electrodes_y;
+	u8 origin_x;
+	u8 origin_y;
+	u8 panel_id;
+	u8 btn;
+	u8 scan_mode;
+	u8 max_tch;
+	u8 rx_num;
+	u8 tx_num;
+};
+
+enum cyttsp5_tch_abs {  /* for ordering within the extracted touch data array */
+	CY_TCH_X,       /* X */
+	CY_TCH_Y,       /* Y */
+	CY_TCH_P,       /* P (Z) */
+	CY_TCH_T,       /* TOUCH ID */
+	CY_TCH_E,       /* EVENT ID */
+	CY_TCH_O,       /* OBJECT ID */
+	CY_TCH_TIP,     /* OBJECT ID */
+	CY_TCH_MAJ,     /* TOUCH_MAJOR */
+	CY_TCH_MIN,     /* TOUCH_MINOR */
+	CY_TCH_OR,      /* ORIENTATION */
+	CY_TCH_NUM_ABS,
+};
+
+enum cyttsp5_tch_hdr {
+	CY_TCH_TIME,    /* SCAN TIME */
+	CY_TCH_NUM,     /* NUMBER OF RECORDS */
+	CY_TCH_LO,      /* LARGE OBJECT */
+	CY_TCH_NOISE,   /* NOISE EFFECT */
+	CY_TCH_COUNTER, /* REPORT_COUNTER */
+	CY_TCH_NUM_HDR,
+};
+
+static const char * const cyttsp5_tch_abs_string[] = {
+	[CY_TCH_X]       = "X",
+	[CY_TCH_Y]       = "Y",
+	[CY_TCH_P]       = "P",
+	[CY_TCH_T]       = "T",
+	[CY_TCH_E]       = "E",
+	[CY_TCH_O]       = "O",
+	[CY_TCH_TIP]     = "TIP",
+	[CY_TCH_MAJ]     = "MAJ",
+	[CY_TCH_MIN]     = "MIN",
+	[CY_TCH_OR]      = "OR",
+	[CY_TCH_NUM_ABS] = "INVALID",
+};
+
+static const char * const cyttsp5_tch_hdr_string[] = {
+	[CY_TCH_TIME]    = "SCAN TIME",
+	[CY_TCH_NUM]     = "NUMBER OF RECORDS",
+	[CY_TCH_LO]      = "LARGE OBJECT",
+	[CY_TCH_NOISE]   = "NOISE EFFECT",
+	[CY_TCH_COUNTER] = "REPORT_COUNTER",
+	[CY_TCH_NUM_HDR] = "INVALID",
+};
+
+static const int cyttsp5_tch_abs_field_map[] = {
+	[CY_TCH_X]       = 0x00010030 /* HID_GD_X */,
+	[CY_TCH_Y]       = 0x00010031 /* HID_GD_Y */,
+	[CY_TCH_P]       = HID_DI_PRESSURE,
+	[CY_TCH_T]       = HID_DI_CONTACTID,
+	[CY_TCH_E]       = HID_CY_EVENTID,
+	[CY_TCH_O]       = HID_CY_TOUCHTYPE,
+	[CY_TCH_TIP]     = HID_DI_TIP,
+	[CY_TCH_MAJ]     = HID_CY_MAJORAXISLENGTH,
+	[CY_TCH_MIN]     = HID_CY_MINORAXISLENGTH,
+	[CY_TCH_OR]      = HID_CY_ORIENTATION,
+	[CY_TCH_NUM_ABS] = 0,
+};
+
+static const int cyttsp5_tch_hdr_field_map[] = {
+	[CY_TCH_TIME]    = HID_DI_SCANTIME,
+	[CY_TCH_NUM]     = HID_DI_CONTACTCOUNT,
+	[CY_TCH_LO]      = HID_CY_LARGEOBJECT,
+	[CY_TCH_NOISE]   = HID_CY_NOISEEFFECTS,
+	[CY_TCH_COUNTER] = HID_CY_REPORTCOUNTER,
+	[CY_TCH_NUM_HDR] = 0,
+};
+
+#define CY_NUM_EXT_TCH_FIELDS   3
+
+struct cyttsp5_tch_abs_params {
+	size_t ofs;     /* abs byte offset */
+	size_t size;    /* size in bits */
+	size_t min;     /* min value */
+	size_t max;     /* max value */
+	size_t bofs;    /* bit offset */
+	u8 report;
+};
+
+struct cyttsp5_touch {
+	int hdr[CY_TCH_NUM_HDR];
+	int abs[CY_TCH_NUM_ABS];
+};
+
+/* button to keycode support */
+#define CY_BITS_PER_BTN         1
+#define CY_NUM_BTN_EVENT_ID     ((1 << CY_BITS_PER_BTN) - 1)
+
+enum cyttsp5_btn_state {
+	CY_BTN_RELEASED = 0,
+	CY_BTN_PRESSED = 1,
+	CY_BTN_NUM_STATE
+};
+
+struct cyttsp5_btn {
+	bool enabled;
+	int state; /* CY_BTN_PRESSED, CY_BTN_RELEASED */
+	int key_code;
+};
+
+enum cyttsp5_ic_ebid {
+	CY_TCH_PARM_EBID,
+	CY_MDATA_EBID,
+	CY_DDATA_EBID,
+};
+
+/* ttconfig block */
+#define CY_TTCONFIG_VERSION_OFFSET      8
+#define CY_TTCONFIG_VERSION_SIZE        2
+#define CY_TTCONFIG_VERSION_ROW         0
+
+struct cyttsp5_ttconfig {
+	u16 version;
+	u16 crc;
+};
+
+struct cyttsp5_report_desc_data {
+	u16 tch_report_id;
+	u16 tch_record_size;
+	u16 tch_header_size;
+	u16 btn_report_id;
+};
+
+struct cyttsp5_sysinfo {
+	bool ready;
+	struct cyttsp5_cydata cydata;
+	struct cyttsp5_sensing_conf_data sensing_conf_data;
+	struct cyttsp5_report_desc_data desc;
+	int num_btns;
+	struct cyttsp5_btn *btn;
+	struct cyttsp5_ttconfig ttconfig;
+	struct cyttsp5_tch_abs_params tch_hdr[CY_TCH_NUM_HDR];
+	struct cyttsp5_tch_abs_params tch_abs[CY_TCH_NUM_ABS];
+	u8 *xy_mode;
+	u8 *xy_data;
+};
+
+enum cyttsp5_atten_type {
+	CY_ATTEN_IRQ,
+	CY_ATTEN_STARTUP,
+	CY_ATTEN_EXCLUSIVE,
+	CY_ATTEN_WAKE,
+	CY_ATTEN_LOADER,
+	CY_ATTEN_SUSPEND,
+	CY_ATTEN_RESUME,
+	CY_ATTEN_NUM_ATTEN,
+};
+
+enum cyttsp5_sleep_state {
+	SS_SLEEP_OFF,
+	SS_SLEEP_ON,
+	SS_SLEEPING,
+	SS_WAKING,
+};
+
+enum cyttsp5_fb_state {
+	FB_ON,
+	FB_OFF,
+};
+
+enum cyttsp5_startup_state {
+	STARTUP_NONE,
+	STARTUP_QUEUED,
+	STARTUP_RUNNING,
+	STARTUP_ILLEGAL,
+};
+
+struct cyttsp5_hid_desc {
+	__le16 hid_desc_len;
+	u8 packet_id;
+	u8 reserved_byte;
+	__le16 bcd_version;
+	__le16 report_desc_len;
+	__le16 report_desc_register;
+	__le16 input_register;
+	__le16 max_input_len;
+	__le16 output_register;
+	__le16 max_output_len;
+	__le16 command_register;
+	__le16 data_register;
+	__le16 vendor_id;
+	__le16 product_id;
+	__le16 version_id;
+	u8 reserved[4];
+} __packed;
+
+struct cyttsp5_hid_core {
+	u16 hid_vendor_id;
+	u16 hid_product_id;
+	__le16 hid_desc_register;
+	u16 hid_report_desc_len;
+	u16 hid_max_input_len;
+	u16 hid_max_output_len;
+};
+
+#define CY_HID_MAX_REPORTS             8
+#define CY_HID_MAX_FIELDS              128
+#define CY_HID_MAX_COLLECTIONS         3
+#define CY_HID_MAX_NESTED_COLLECTIONS  CY_HID_MAX_COLLECTIONS
+
+#define CY_MAX_INPUT                   512
+#define CY_PIP_1P7_EMPTY_BUF           0xFF00
+
+enum cyttsp5_module_id {
+	CY_MODULE_MT,
+	CY_MODULE_BTN,
+	CY_MODULE_PROX,
+	CY_MODULE_LAST,
+};
+
+struct cyttsp5_mt_data;
+struct cyttsp5_mt_function {
+	int (*mt_release)(struct device *dev);
+	int (*mt_probe)(struct device *dev, struct cyttsp5_mt_data *md);
+	void (*report_slot_liftoff)(struct cyttsp5_mt_data *md, int max_slots);
+	void (*input_sync)(struct input_dev *input);
+	void (*input_report)(struct input_dev *input, int sig, int t, int type);
+	void (*final_sync)(struct input_dev *input, int max_slots,
+			int mt_sync_count, unsigned long *ids);
+	int (*input_register_device)(struct input_dev *input, int max_slots);
+};
+
+struct cyttsp5_mt_data {
+	struct device *dev;
+	struct cyttsp5_mt_platform_data *pdata;
+	struct cyttsp5_sysinfo *si;
+	struct input_dev *input;
+	struct cyttsp5_mt_function mt_function;
+	struct mutex mt_lock;
+	bool is_suspended;
+	bool input_device_registered;
+	char phys[NAME_MAX];
+	int num_prv_rec;
+	int or_min;
+	int or_max;
+	int t_min;
+	int t_max;
+};
+
+struct cyttsp5_btn_data {
+	struct device *dev;
+	struct cyttsp5_btn_platform_data *pdata;
+	struct cyttsp5_sysinfo *si;
+	struct input_dev *input;
+	struct mutex btn_lock;
+	bool is_suspended;
+	bool input_device_registered;
+	char phys[NAME_MAX];
+};
+
+struct cyttsp5_proximity_data {
+	struct device *dev;
+	struct cyttsp5_proximity_platform_data *pdata;
+	struct cyttsp5_sysinfo *si;
+	struct input_dev *input;
+	struct mutex prox_lock;
+	struct mutex sysfs_lock;
+	int enable_count;
+	bool input_device_registered;
+	char phys[NAME_MAX];
+};
+
+enum cyttsp5_calibrate_idacs_sensing_mode {
+	CY_CI_SM_MUTCAP_FINE,
+	CY_CI_SM_MUTCAP_BUTTON,
+	CY_CI_SM_SELFCAP,
+};
+
+enum cyttsp5_initialize_baselines_sensing_mode {
+	CY_IB_SM_MUTCAP = 1,
+	CY_IB_SM_BUTTON = 2,
+	CY_IB_SM_SELFCAP = 4,
+	CY_IB_SM_BALANCED = 8,
+};
+
+struct cyttsp5_core_nonhid_cmd {
+	int (*start_bl)(struct device *dev, int protect);
+	int (*suspend_scanning)(struct device *dev, int protect);
+	int (*resume_scanning)(struct device *dev, int protect);
+	int (*get_param)(struct device *dev, int protect, u8 param_id,
+			u32 *value);
+	int (*set_param)(struct device *dev, int protect, u8 param_id,
+			u32 value, u8 size);
+	int (*verify_config_block_crc)(struct device *dev, int protect,
+			u8 ebid, u8 *status, u16 *calculated_crc,
+			u16 *stored_crc);
+	int (*get_config_row_size)(struct device *dev, int protect,
+			u16 *row_size);
+	int (*get_data_structure)(struct device *dev, int protect,
+			u16 read_offset, u16 read_length, u8 data_id,
+			u8 *status, u8 *data_format, u16 *actual_read_len,
+			u8 *data);
+	int (*run_selftest)(struct device *dev, int protect, u8 test_id,
+		u8 write_idacs_to_flash, u8 *status, u8 *summary_result,
+		u8 *results_available);
+	int (*get_selftest_result)(struct device *dev, int protect,
+		u16 read_offset, u16 read_length, u8 test_id, u8 *status,
+		u16 *actual_read_len, u8 *data);
+	int (*calibrate_idacs)(struct device *dev, int protect, u8 mode,
+			u8 *status);
+	int (*initialize_baselines)(struct device *dev, int protect,
+			u8 test_id, u8 *status);
+	int (*exec_panel_scan)(struct device *dev, int protect);
+	int (*retrieve_panel_scan)(struct device *dev, int protect,
+			u16 read_offset, u16 read_count, u8 data_id,
+			u8 *response, u8 *config, u16 *actual_read_len,
+			u8 *read_buf);
+	int (*write_conf_block)(struct device *dev, int protect,
+			u16 row_number, u16 write_length, u8 ebid,
+			u8 *write_buf, u8 *security_key, u16 *actual_write_len);
+	int (*user_cmd)(struct device *dev, int protect, u16 read_len,
+			u8 *read_buf, u16 write_len, u8 *write_buf,
+			u16 *actual_read_len);
+	int (*get_bl_info)(struct device *dev, int protect, u8 *return_data);
+	int (*initiate_bl)(struct device *dev, int protect, u16 key_size,
+			u8 *key_buf, u16 row_size, u8 *metadata_row_buf);
+	int (*launch_app)(struct device *dev, int protect);
+	int (*prog_and_verify)(struct device *dev, int protect, u16 data_len,
+			u8 *data_buf);
+	int (*verify_app_integrity)(struct device *dev, int protect,
+			u8 *result);
+	int (*get_panel_id)(struct device *dev, int protect, u8 *panel_id);
+};
+
+typedef int (*cyttsp5_atten_func) (struct device *);
+
+struct cyttsp5_core_commands {
+	int (*subscribe_attention)(struct device *dev,
+			enum cyttsp5_atten_type type, char *id,
+			cyttsp5_atten_func func, int flags);
+	int (*unsubscribe_attention)(struct device *dev,
+			enum cyttsp5_atten_type type, char *id,
+			cyttsp5_atten_func func, int flags);
+	int (*request_exclusive)(struct device *dev, int timeout_ms);
+	int (*release_exclusive)(struct device *dev);
+	int (*request_reset)(struct device *dev);
+	int (*request_restart)(struct device *dev, bool wait);
+	struct cyttsp5_sysinfo * (*request_sysinfo)(struct device *dev);
+	struct cyttsp5_loader_platform_data
+		*(*request_loader_pdata)(struct device *dev);
+	int (*request_stop_wd)(struct device *dev);
+	int (*request_start_wd)(struct device *dev);
+	int (*request_get_hid_desc)(struct device *dev, int protect);
+	int (*request_get_mode)(struct device *dev, int protect, u8 *mode);
+	int (*request_enable_scan_type)(struct device *dev, u8 scan_type);
+	int (*request_disable_scan_type)(struct device *dev, u8 scan_type);
+	struct cyttsp5_core_nonhid_cmd *nonhid_cmd;
+#ifdef TTHE_TUNER_SUPPORT
+	int (*request_tthe_print)(struct device *dev, u8 *buf, int buf_len,
+			const u8 *data_name);
+#endif
+};
+
+struct cyttsp5_features {
+	uint8_t easywake;
+	uint8_t noise_metric;
+	uint8_t tracking_heatmap;
+	uint8_t sensor_data;
+};
+
+struct cyttsp5_module {
+	struct list_head node;
+	char *name;
+	int (*probe)(struct device *dev, void **data);
+	void (*release)(struct device *dev, void *data);
+};
+
+struct cyttsp5_core_data {
+
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+
+	struct regulator *vdd;
+	struct regulator *vcc_i2c;
+
+
+	struct list_head node;
+	struct list_head module_list; /* List of probed modules */
+	char core_id[20];
+	struct device *dev;
+	struct list_head atten_list[CY_ATTEN_NUM_ATTEN];
+	struct list_head param_list;
+	struct mutex module_list_lock;
+	struct mutex system_lock;
+	struct mutex adap_lock;
+	struct mutex hid_report_lock;
+	enum cyttsp5_mode mode;
+	spinlock_t spinlock;
+	struct cyttsp5_mt_data md;
+	struct cyttsp5_btn_data bd;
+	struct cyttsp5_proximity_data pd;
+	int phys_num;
+	int number_of_open_input_device;
+	int pm_runtime_usage_count;
+	void *cyttsp5_dynamic_data[CY_MODULE_LAST];
+	struct cyttsp5_platform_data *pdata;
+	struct cyttsp5_core_platform_data *cpdata;
+	const struct cyttsp5_bus_ops *bus_ops;
+	wait_queue_head_t wait_q;
+	enum cyttsp5_sleep_state sleep_state;
+	enum cyttsp5_startup_state startup_state;
+	int irq;
+	bool irq_enabled;
+	bool irq_wake;
+	bool irq_disabled;
+	int large_power_state;
+	u8 easy_wakeup_gesture;
+#ifdef EASYWAKE_TSG6
+	u8 gesture_id;
+	u8 gesture_data_length;
+	u8 gesture_data[80];
+#endif
+	bool wake_initiated_by_device;
+	bool wait_until_wake;
+	u8 panel_id;
+#ifdef NEED_SUSPEND_NOTIFIER
+	/*
+	 * This notifier is used to receive suspend prepare events
+	 * When device is PM runtime suspended, pm_generic_suspend()
+	 * does not call our PM suspend callback for kernels with
+	 * version less than 3.3.0.
+	 */
+	struct notifier_block pm_notifier;
+#endif
+	struct work_struct startup_work;
+	struct cyttsp5_sysinfo sysinfo;
+	void *exclusive_dev;
+	int exclusive_waits;
+	struct work_struct watchdog_work;
+	struct timer_list watchdog_timer;
+	u16 startup_retry_count;
+	struct cyttsp5_hid_core hid_core;
+	int hid_cmd_state;
+	int hid_reset_cmd_state; /* reset can happen any time */
+	struct cyttsp5_hid_desc hid_desc;
+	struct cyttsp5_hid_report *hid_reports[CY_HID_MAX_REPORTS];
+	int num_hid_reports;
+	struct cyttsp5_features features;
+#define CYTTSP5_PREALLOCATED_CMD_BUFFER 32
+	u8 cmd_buf[CYTTSP5_PREALLOCATED_CMD_BUFFER];
+	u8 input_buf[CY_MAX_INPUT];
+	u8 response_buf[CY_MAX_INPUT];
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend es;
+#elif defined(CONFIG_FB)
+	struct notifier_block fb_notifier;
+	enum cyttsp5_fb_state fb_state;
+#endif
+#ifdef TTHE_TUNER_SUPPORT
+	struct dentry *tthe_debugfs;
+	u8 *tthe_buf;
+	u32 tthe_buf_len;
+	struct mutex tthe_lock;
+	u8 tthe_exit;
+#endif
+#ifdef VERBOSE_DEBUG
+	u8 pr_buf[CY_MAX_PRBUF_SIZE];
+#endif
+	u8 debug_level;
+	u32 watchdog_interval;
+	u8 show_timestamp;
+
+};
+struct gd_sensor {
+	int32_t cm_min;
+	int32_t cm_max;
+	int32_t cm_ave;
+	int32_t cm_min_exclude_edge;
+	int32_t cm_max_exclude_edge;
+	int32_t cm_ave_exclude_edge;
+	int32_t gradient_val;
+};
+
+#ifdef TTHE_TUNER_SUPPORT
+#define CY_CMD_RET_PANEL_IN_DATA_OFFSET 0
+#define CY_CMD_RET_PANEL_ELMNT_SZ_MASK  0x07
+#define CY_CMD_RET_PANEL_HDR            0x0A
+#define CY_CMD_RET_PANEL_ELMNT_SZ_MAX   0x2
+
+enum scan_data_type_list {
+	CY_MUT_RAW,
+	CY_MUT_BASE,
+	CY_MUT_DIFF,
+	CY_SELF_RAW,
+	CY_SELF_BASE,
+	CY_SELF_DIFF,
+	CY_BAL_RAW,
+	CY_BAL_BASE,
+	CY_BAL_DIFF,
+};
+#endif
+
+struct cyttsp5_bus_ops {
+	u16 bustype;
+
+	int (*read_default)(struct device *dev, void *buf, int size);
+	int (*read_default_nosize)(struct device *dev, u8 *buf, u32 max);
+	int (*write_read_specific)(struct device *dev, u8 write_len,
+			u8 *write_buf, u8 *read_buf);
+};
+
+static inline int cyttsp5_adap_read_default(struct cyttsp5_core_data *cd,
+		void *buf, int size)
+{
+	return cd->bus_ops->read_default(cd->dev, buf, size);
+}
+
+static inline int cyttsp5_adap_read_default_nosize(struct cyttsp5_core_data *cd,
+		void *buf, int max)
+{
+	return cd->bus_ops->read_default_nosize(cd->dev, buf, max);
+}
+
+static inline int cyttsp5_adap_write_read_specific(struct cyttsp5_core_data *cd,
+		u8 write_len, u8 *write_buf, u8 *read_buf)
+{
+	return cd->bus_ops->write_read_specific(cd->dev, write_len, write_buf,
+			read_buf);
+}
+
+static inline void *cyttsp5_get_dynamic_data(struct device *dev, int id)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return cd->cyttsp5_dynamic_data[id];
+}
+
+int request_exclusive(struct cyttsp5_core_data *cd, void *ownptr,
+		int timeout_ms);
+int release_exclusive(struct cyttsp5_core_data *cd, void *ownptr);
+int _cyttsp5_request_hid_output_get_param(struct device *dev,
+		int protect, u8 param_id, u32 *value);
+int _cyttsp5_request_hid_output_set_param(struct device *dev,
+		int protect, u8 param_id, u32 value, u8 size);
+
+static inline int cyttsp5_request_exclusive(struct device *dev, int timeout_ms)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return request_exclusive(cd, dev, timeout_ms);
+}
+
+static inline int cyttsp5_release_exclusive(struct device *dev)
+{
+	struct cyttsp5_core_data *cd = dev_get_drvdata(dev);
+
+	return release_exclusive(cd, dev);
+}
+
+static inline int cyttsp5_request_nonhid_get_param(struct device *dev,
+		int protect, u8 param_id, u32 *value)
+{
+	return _cyttsp5_request_hid_output_get_param(dev, protect, param_id,
+			value);
+}
+
+static inline int cyttsp5_request_nonhid_set_param(struct device *dev,
+		int protect, u8 param_id, u32 value, u8 size)
+{
+	return _cyttsp5_request_hid_output_set_param(dev, protect, param_id,
+			value, size);
+}
+
+#ifdef VERBOSE_DEBUG
+void cyttsp5_pr_buf(struct device *dev, u8 *dptr, int size,
+		const char *data_name);
+#else
+#define cyttsp5_pr_buf(a, b, c, d) do { } while (0)
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+int cyttsp5_devtree_create_and_get_pdata(struct device *adap_dev);
+int cyttsp5_devtree_clean_pdata(struct device *adap_dev);
+#else
+static inline int cyttsp5_devtree_create_and_get_pdata(struct device *adap_dev)
+{
+	return 0;
+}
+
+static inline int cyttsp5_devtree_clean_pdata(struct device *adap_dev)
+{
+	return 0;
+}
+#endif
+
+int cyttsp5_probe(const struct cyttsp5_bus_ops *ops, struct device *dev,
+		u16 irq, size_t xfer_buf_size);
+int cyttsp5_release(struct cyttsp5_core_data *cd);
+
+struct cyttsp5_core_commands *cyttsp5_get_commands(void);
+struct cyttsp5_core_data *cyttsp5_get_core_data(char *id);
+
+int cyttsp5_mt_release(struct device *dev);
+int cyttsp5_mt_probe(struct device *dev);
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_BUTTON
+int cyttsp5_btn_probe(struct device *dev);
+int cyttsp5_btn_release(struct device *dev);
+#else
+static inline int cyttsp5_btn_probe(struct device *dev) { return 0; }
+static inline int cyttsp5_btn_release(struct device *dev) { return 0; }
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_PROXIMITY
+int cyttsp5_proximity_probe(struct device *dev);
+int cyttsp5_proximity_release(struct device *dev);
+#else
+static inline int cyttsp5_proximity_probe(struct device *dev) { return 0; }
+static inline int cyttsp5_proximity_release(struct device *dev) { return 0; }
+#endif
+
+void cyttsp5_init_function_ptrs(struct cyttsp5_mt_data *md);
+int _cyttsp5_subscribe_attention(struct device *dev,
+	enum cyttsp5_atten_type type, char *id, int (*func)(struct device *),
+	int mode);
+int _cyttsp5_unsubscribe_attention(struct device *dev,
+	enum cyttsp5_atten_type type, char *id, int (*func)(struct device *),
+	int mode);
+struct cyttsp5_sysinfo *_cyttsp5_request_sysinfo(struct device *dev);
+
+extern const struct dev_pm_ops cyttsp5_pm_ops;
+
+int cyttsp5_register_module(struct cyttsp5_module *module);
+void cyttsp5_unregister_module(struct cyttsp5_module *module);
+
+void *cyttsp5_get_module_data(struct device *dev,
+		struct cyttsp5_module *module);
+
+#endif /* _CYTTSP5_REGS_H */
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_spi.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_spi.c
new file mode 100644
index 0000000..3cbfecc
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_spi.c
@@ -0,0 +1,255 @@
+/*
+ * cyttsp5_spi.c
+ * Parade TrueTouch(TM) Standard Product V5 SPI Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include "cyttsp5_regs.h"
+
+#include <linux/spi/spi.h>
+#include <linux/version.h>
+
+#define CY_SPI_WR_OP            0x00 /* r/~w */
+#define CY_SPI_RD_OP            0x01
+#define CY_SPI_BITS_PER_WORD    8
+#define CY_SPI_SYNC_ACK         0x62
+
+#define CY_SPI_CMD_BYTES        0
+#define CY_SPI_DATA_SIZE        (2 * 256)
+#define CY_SPI_DATA_BUF_SIZE    (CY_SPI_CMD_BYTES + CY_SPI_DATA_SIZE)
+
+static void cyttsp5_spi_add_rw_msg(struct spi_message *msg,
+		struct spi_transfer *xfer, u8 *w_header, u8 *r_header, u8 op)
+{
+	xfer->tx_buf = w_header;
+	xfer->rx_buf = r_header;
+	w_header[0] = op;
+	xfer->len = 1;
+	spi_message_add_tail(xfer, msg);
+}
+
+static int cyttsp5_spi_xfer(struct device *dev, u8 op, u8 *buf, int length)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct spi_message msg;
+	struct spi_transfer xfer[2];
+	u8 w_header[2];
+	u8 r_header[2];
+	int rc;
+
+	memset(xfer, 0, sizeof(xfer));
+
+	spi_message_init(&msg);
+	cyttsp5_spi_add_rw_msg(&msg, &xfer[0], w_header, r_header, op);
+
+	switch (op) {
+	case CY_SPI_RD_OP:
+		xfer[1].rx_buf = buf;
+		xfer[1].len = length;
+		spi_message_add_tail(&xfer[1], &msg);
+		break;
+	case CY_SPI_WR_OP:
+		xfer[1].tx_buf = buf;
+		xfer[1].len = length;
+		spi_message_add_tail(&xfer[1], &msg);
+		break;
+	default:
+		rc = -EIO;
+		goto exit;
+	}
+
+	rc = spi_sync(spi, &msg);
+exit:
+	if (rc < 0)
+		parade_debug(dev, DEBUG_LEVEL_2, "%s: spi_sync() error %d\n",
+			__func__, rc);
+
+	if (r_header[0] != CY_SPI_SYNC_ACK)
+		return -EIO;
+
+	return rc;
+}
+
+static int cyttsp5_spi_read_default(struct device *dev, void *buf, int size)
+{
+	if (!buf || !size)
+		return 0;
+
+	return cyttsp5_spi_xfer(dev, CY_SPI_RD_OP, buf, size);
+}
+
+static int cyttsp5_spi_read_default_nosize(struct device *dev, u8 *buf, u32 max)
+{
+	u32 size;
+	int rc;
+
+	if (!buf)
+		return 0;
+
+	rc = cyttsp5_spi_xfer(dev, CY_SPI_RD_OP, buf, 2);
+	if (rc < 0)
+		return rc;
+
+	size = get_unaligned_le16(&buf[0]);
+	if (!size)
+		return rc;
+
+	if (size > max)
+		return -EINVAL;
+
+	return cyttsp5_spi_read_default(dev, buf, size);
+}
+
+static int cyttsp5_spi_write_read_specific(struct device *dev, u8 write_len,
+		u8 *write_buf, u8 *read_buf)
+{
+	int rc;
+
+	rc = cyttsp5_spi_xfer(dev, CY_SPI_WR_OP, write_buf, write_len);
+	if (rc < 0)
+		return rc;
+
+	if (read_buf)
+		rc = cyttsp5_spi_read_default_nosize(dev, read_buf,
+				CY_SPI_DATA_SIZE);
+
+	return rc;
+}
+
+static struct cyttsp5_bus_ops cyttsp5_spi_bus_ops = {
+	.bustype = BUS_SPI,
+	.read_default = cyttsp5_spi_read_default,
+	.read_default_nosize = cyttsp5_spi_read_default_nosize,
+	.write_read_specific = cyttsp5_spi_write_read_specific,
+};
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+static const struct of_device_id cyttsp5_spi_of_match[] = {
+	{ .compatible = "cy,cyttsp5_spi_adapter", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cyttsp5_spi_of_match);
+#endif
+
+static int cyttsp5_spi_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	const struct of_device_id *match;
+#endif
+	int rc;
+
+	/* Set up SPI*/
+	spi->bits_per_word = CY_SPI_BITS_PER_WORD;
+	spi->mode = SPI_MODE_0;
+	rc = spi_setup(spi);
+	if (rc < 0) {
+		dev_err(dev, "%s: SPI setup error %d\n", __func__, rc);
+		return rc;
+	}
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	match = of_match_device(of_match_ptr(cyttsp5_spi_of_match), dev);
+	if (match) {
+		rc = cyttsp5_devtree_create_and_get_pdata(dev);
+		if (rc < 0)
+			return rc;
+	}
+#endif
+
+	rc = cyttsp5_probe(&cyttsp5_spi_bus_ops, &spi->dev, spi->irq,
+			  CY_SPI_DATA_BUF_SIZE);
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	if (rc && match)
+		cyttsp5_devtree_clean_pdata(dev);
+#endif
+
+	return rc;
+}
+
+static int cyttsp5_spi_remove(struct spi_device *spi)
+{
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	struct device *dev = &spi->dev;
+	const struct of_device_id *match;
+#endif
+	struct cyttsp5_core_data *cd = dev_get_drvdata(&spi->dev);
+
+	cyttsp5_release(cd);
+
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+	match = of_match_device(of_match_ptr(cyttsp5_spi_of_match), dev);
+	if (match)
+		cyttsp5_devtree_clean_pdata(dev);
+#endif
+
+	return 0;
+}
+
+static const struct spi_device_id cyttsp5_spi_id[] = {
+	{ CYTTSP5_SPI_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, cyttsp5_spi_id);
+
+static struct spi_driver cyttsp5_spi_driver = {
+	.driver = {
+		.name = CYTTSP5_SPI_NAME,
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+		.pm = &cyttsp5_pm_ops,
+#ifdef CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_DEVICETREE_SUPPORT
+		.of_match_table = cyttsp5_spi_of_match,
+#endif
+	},
+	.probe = cyttsp5_spi_probe,
+	.remove = (cyttsp5_spi_remove),
+	.id_table = cyttsp5_spi_id,
+};
+
+#if (KERNEL_VERSION(3, 3, 0) <= LINUX_VERSION_CODE)
+module_spi_driver(cyttsp5_spi_driver);
+#else
+static int __init cyttsp5_spi_init(void)
+{
+	int err = spi_register_driver(&cyttsp5_spi_driver);
+
+	pr_info("%s: Parade TTSP SPI Driver (Built %s) rc=%d\n",
+		 __func__, CY_DRIVER_VERSION, err);
+	return err;
+}
+module_init(cyttsp5_spi_init);
+
+static void __exit cyttsp5_spi_exit(void)
+{
+	spi_unregister_driver(&cyttsp5_spi_driver);
+}
+module_exit(cyttsp5_spi_exit);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Standard Product SPI Driver");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/input/touchscreen/cyttsp5/cyttsp5_test_device_access_api.c b/drivers/input/touchscreen/cyttsp5/cyttsp5_test_device_access_api.c
new file mode 100644
index 0000000..f7ee817
--- /dev/null
+++ b/drivers/input/touchscreen/cyttsp5/cyttsp5_test_device_access_api.c
@@ -0,0 +1,442 @@
+/*
+ * cyttsp5_test_device_access_api.c
+ * Parade TrueTouch(TM) Standard Product V5 Device Access API test module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015-2019 Parade Technologies
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Technologies at www.paradetech.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input/cyttsp5_device_access-api.h>
+#include <asm/unaligned.h>
+
+#define BUFFER_SIZE              256
+
+#define COMMAND_GET_SYSTEM_INFO  2
+#define COMMAND_SUSPEND_SCANNING 3
+#define COMMAND_RESUME_SCANNING  4
+#define COMMAND_GET_PARAMETER    5
+#define COMMAND_SET_PARAMETER    6
+
+#define PARAMETER_ACTIVE_DISTANCE_2 0x0B
+
+struct tt_output_report {
+	__le16 reg_address;
+	__le16 length;
+	u8 report_id;
+	u8 reserved;
+	u8 command;
+	u8 parameters[0];
+} __packed;
+
+struct tt_input_report {
+	__le16 length;
+	u8 report_id;
+	u8 reserved;
+	u8 command;
+	u8 return_data[0];
+} __packed;
+
+static int prepare_tt_output_report(struct tt_output_report *out,
+		u16 length, u8 command)
+{
+	put_unaligned_le16(0x04, &out->reg_address);
+	put_unaligned_le16(5 + length, &out->length);
+
+	out->report_id = 0x2f;
+	out->reserved = 0x00;
+	out->command = command;
+
+	return 7 + length;
+}
+
+static int check_and_parse_tt_input_report(struct tt_input_report *in,
+		u16 *length, u8 *command)
+{
+	if (in->report_id != 0x1f)
+		return -EINVAL;
+
+	*length = get_unaligned_le16(&in->length);
+	*command = in->command & 0x7f;
+
+	return 0;
+}
+
+static int prepare_get_system_info_report(u8 *buf)
+{
+	struct tt_output_report *out = (struct tt_output_report *)buf;
+
+	return prepare_tt_output_report(out, 0, COMMAND_GET_SYSTEM_INFO);
+}
+
+static int check_get_system_info_response(u8 *buf, u16 read_length)
+{
+	struct tt_input_report *in = (struct tt_input_report *)buf;
+	u16 length = 0;
+	u8 command = 0;
+
+	if (read_length != 51)
+		return -EINVAL;
+
+	if (check_and_parse_tt_input_report(in, &length, &command)
+			|| command != COMMAND_GET_SYSTEM_INFO
+			|| length != 51)
+		return -EINVAL;
+
+	pr_info("PIP Major Version: %d\n", in->return_data[0]);
+	pr_info("PIP Minor Version: %d\n", in->return_data[1]);
+	pr_info("Touch Firmware Product Id: %d\n",
+			get_unaligned_le16(&in->return_data[2]));
+	pr_info("Touch Firmware Major Version: %d\n", in->return_data[4]);
+	pr_info("Touch Firmware Minor Version: %d\n", in->return_data[5]);
+	pr_info("Touch Firmware Internal Revision Control Number: %d\n",
+			get_unaligned_le32(&in->return_data[6]));
+	pr_info("Customer Specified Firmware/Configuration Version: %d\n",
+			get_unaligned_le16(&in->return_data[10]));
+	pr_info("Bootloader Major Version: %d\n", in->return_data[12]);
+	pr_info("Bootloader Minor Version: %d\n", in->return_data[13]);
+	pr_info("Family ID: 0x%02x\n", in->return_data[14]);
+	pr_info("Revision ID: 0x%02x\n", in->return_data[15]);
+	pr_info("Silicon ID: 0x%02x\n",
+			get_unaligned_le16(&in->return_data[16]));
+	pr_info("Parade Manufacturing ID[0]: 0x%02x\n", in->return_data[18]);
+	pr_info("Parade Manufacturing ID[1]: 0x%02x\n", in->return_data[19]);
+	pr_info("Parade Manufacturing ID[2]: 0x%02x\n", in->return_data[20]);
+	pr_info("Parade Manufacturing ID[3]: 0x%02x\n", in->return_data[21]);
+	pr_info("Parade Manufacturing ID[4]: 0x%02x\n", in->return_data[22]);
+	pr_info("Parade Manufacturing ID[5]: 0x%02x\n", in->return_data[23]);
+	pr_info("Parade Manufacturing ID[6]: 0x%02x\n", in->return_data[24]);
+	pr_info("Parade Manufacturing ID[7]: 0x%02x\n", in->return_data[25]);
+	pr_info("POST Result Code: 0x%02x\n",
+			get_unaligned_le16(&in->return_data[26]));
+
+	pr_info("Number of X Electrodes: %d\n", in->return_data[28]);
+	pr_info("Number of Y Electrodes: %d\n", in->return_data[29]);
+	pr_info("Panel X Axis Length: %d\n",
+			get_unaligned_le16(&in->return_data[30]));
+	pr_info("Panel Y Axis Length: %d\n",
+			get_unaligned_le16(&in->return_data[32]));
+	pr_info("Panel X Axis Resolution: %d\n",
+			get_unaligned_le16(&in->return_data[34]));
+	pr_info("Panel Y Axis Resolution: %d\n",
+			get_unaligned_le16(&in->return_data[36]));
+	pr_info("Panel Pressure Resolution: %d\n",
+			get_unaligned_le16(&in->return_data[38]));
+	pr_info("X_ORG: %d\n", in->return_data[40]);
+	pr_info("Y_ORG: %d\n", in->return_data[41]);
+	pr_info("Panel ID: %d\n", in->return_data[42]);
+	pr_info("Buttons: 0x%02x\n", in->return_data[43]);
+	pr_info("BAL SELF MC: 0x%02x\n", in->return_data[44]);
+	pr_info("Max Number of Touch Records per Refresh Cycle: %d\n",
+			in->return_data[45]);
+
+	return 0;
+}
+
+static int prepare_get_parameter_report(u8 *buf, u8 parameter_id)
+{
+	struct tt_output_report *out = (struct tt_output_report *)buf;
+
+	out->parameters[0] = parameter_id;
+
+	return prepare_tt_output_report(out, 1, COMMAND_GET_PARAMETER);
+}
+
+static int check_get_parameter_response(u8 *buf, u16 read_length,
+		u32 *parameter_value)
+{
+	struct tt_input_report *in = (struct tt_input_report *)buf;
+	u16 length = 0;
+	u8 command = 0;
+	u32 param_value = 0;
+	u8 param_id;
+	u8 param_size = 0;
+
+	if (read_length != 8 && read_length != 9 && read_length != 11)
+		return -EINVAL;
+
+	if (check_and_parse_tt_input_report(in, &length, &command)
+			|| command != COMMAND_GET_PARAMETER
+			|| (length != 8 && length != 9 && length != 11))
+		return -EINVAL;
+
+	param_id = in->return_data[0];
+
+	param_size = in->return_data[1];
+
+	if (param_size == 1)
+		param_value = in->return_data[2];
+	else if (param_size == 2)
+		param_value = get_unaligned_le16(&in->return_data[2]);
+	else if (param_size == 4)
+		param_value = get_unaligned_le32(&in->return_data[2]);
+	else
+		return -EINVAL;
+
+	pr_info("%s: Parameter ID: 0x%02x Value: 0x%02x\n",
+		__func__, param_id, param_value);
+
+	if (parameter_value)
+		*parameter_value = param_value;
+
+	return 0;
+}
+
+static int prepare_set_parameter_report(u8 *buf, u8 parameter_id,
+		u8 parameter_size, u32 parameter_value)
+{
+	struct tt_output_report *out = (struct tt_output_report *)buf;
+
+	out->parameters[0] = parameter_id;
+	out->parameters[1] = parameter_size;
+
+	if (parameter_size == 1)
+		out->parameters[2] = (u8)parameter_value;
+	else if (parameter_size == 2)
+		put_unaligned_le16(parameter_value, &out->parameters[2]);
+	else if (parameter_size == 4)
+		put_unaligned_le32(parameter_value, &out->parameters[2]);
+	else
+		return -EINVAL;
+
+	return prepare_tt_output_report(out, 2 + parameter_size,
+			COMMAND_SET_PARAMETER);
+}
+
+static int check_set_parameter_response(u8 *buf, u16 read_length)
+{
+	struct tt_input_report *in = (struct tt_input_report *)buf;
+	u16 length = 0;
+	u8 command = 0;
+	u8 param_id;
+	u8 param_size = 0;
+
+	if (read_length != 7)
+		return -EINVAL;
+
+	if (check_and_parse_tt_input_report(in, &length, &command)
+			|| command != COMMAND_SET_PARAMETER
+			|| length != 7)
+		return -EINVAL;
+
+	param_id = in->return_data[0];
+	param_size = in->return_data[1];
+
+	pr_info("%s: Parameter ID: 0x%02x Size: 0x%02x\n",
+		__func__, param_id, param_size);
+
+	return 0;
+}
+
+static int prepare_suspend_scanning_report(u8 *buf)
+{
+	struct tt_output_report *out = (struct tt_output_report *)buf;
+
+	return prepare_tt_output_report(out, 0, COMMAND_SUSPEND_SCANNING);
+}
+
+static int check_suspend_scanning_response(u8 *buf, u16 read_length)
+{
+	struct tt_input_report *in = (struct tt_input_report *)buf;
+	u16 length = 0;
+	u8 command = 0;
+
+	if (read_length != 5)
+		return -EINVAL;
+
+	if (check_and_parse_tt_input_report(in, &length, &command)
+			|| command != COMMAND_SUSPEND_SCANNING
+			|| length != 5)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prepare_resume_scanning_report(u8 *buf)
+{
+	struct tt_output_report *out = (struct tt_output_report *)buf;
+
+	return prepare_tt_output_report(out, 0, COMMAND_RESUME_SCANNING);
+}
+
+static int check_resume_scanning_response(u8 *buf, u16 read_length)
+{
+	struct tt_input_report *in = (struct tt_input_report *)buf;
+	u16 length = 0;
+	u8 command = 0;
+
+	if (read_length != 5)
+		return -EINVAL;
+
+	if (check_and_parse_tt_input_report(in, &length, &command)
+			|| command != COMMAND_RESUME_SCANNING
+			|| length != 5)
+		return -EINVAL;
+
+	return 0;
+}
+
+void cyttsp5_user_command_async_cont(const char *core_name,
+		u16 read_len, u8 *read_buf, u16 write_len, u8 *write_buf,
+		u16 actual_read_length, int rc)
+{
+	if (rc) {
+		pr_err("%s: suspend scan fails\n", __func__);
+		goto exit;
+	}
+
+	rc = check_suspend_scanning_response(read_buf, actual_read_length);
+	if (rc) {
+		pr_err("%s: check suspend scanning response fails\n", __func__);
+		goto exit;
+	}
+
+	pr_info("%s: suspend scanning succeeds\n", __func__);
+exit:
+	return;
+}
+
+/* Read and write buffers */
+static u8 write_buf[BUFFER_SIZE];
+static u8 read_buf[BUFFER_SIZE];
+
+static uint active_distance;
+module_param(active_distance, uint, 0);
+
+static int __init cyttsp5_test_device_access_api_init(void)
+{
+	u32 initial_active_distance;
+	u16 actual_read_len;
+	int write_len;
+	int rc;
+
+	pr_info("%s: Enter\n", __func__);
+
+	/* CASE	1: Run get system information */
+	write_len = prepare_get_system_info_report(write_buf);
+
+	rc = cyttsp5_device_access_user_command(NULL, sizeof(read_buf),
+			read_buf, write_len, write_buf,
+			&actual_read_len);
+	if (rc)
+		goto exit;
+
+	rc = check_get_system_info_response(read_buf, actual_read_len);
+	if (rc)
+		goto exit;
+
+	/* CASE 2: Run get parameter (Active distance) */
+	write_len = prepare_get_parameter_report(write_buf,
+			PARAMETER_ACTIVE_DISTANCE_2);
+
+	rc = cyttsp5_device_access_user_command(NULL, sizeof(read_buf),
+			read_buf, write_len, write_buf,
+			&actual_read_len);
+	if (rc)
+		goto exit;
+
+	rc = check_get_parameter_response(read_buf, actual_read_len,
+			&initial_active_distance);
+	if (rc)
+		goto exit;
+
+	pr_info("%s: Initial Active Distance: %d\n",
+		__func__, initial_active_distance);
+
+	/* CASE	3: Run set parameter (Active distance) */
+	write_len = prepare_set_parameter_report(write_buf,
+			PARAMETER_ACTIVE_DISTANCE_2, 1,
+			active_distance);
+
+	rc = cyttsp5_device_access_user_command(NULL, sizeof(read_buf),
+			read_buf, write_len, write_buf,
+			&actual_read_len);
+	if (rc)
+		goto exit;
+
+	rc = check_set_parameter_response(read_buf, actual_read_len);
+	if (rc)
+		goto exit;
+
+	pr_info("%s: Active Distance set to %d\n", __func__, active_distance);
+
+	/* CASE	4: Run get parameter (Active distance) */
+	write_len = prepare_get_parameter_report(write_buf,
+			PARAMETER_ACTIVE_DISTANCE_2);
+
+	rc = cyttsp5_device_access_user_command(NULL, sizeof(read_buf),
+			read_buf, write_len, write_buf,
+			&actual_read_len);
+	if (rc)
+		goto exit;
+
+	rc = check_get_parameter_response(read_buf, actual_read_len,
+			&active_distance);
+	if (rc)
+		goto exit;
+
+	pr_info("%s: New Active Distance: %d\n", __func__, active_distance);
+
+	/* CASE 5: Run suspend scanning asynchronously */
+	write_len = prepare_suspend_scanning_report(write_buf);
+
+	preempt_disable();
+	rc = cyttsp5_device_access_user_command_async(NULL,
+			sizeof(read_buf), read_buf, write_len, write_buf,
+			cyttsp5_user_command_async_cont);
+	preempt_enable();
+	if (rc)
+		goto exit;
+exit:
+	return rc;
+}
+module_init(cyttsp5_test_device_access_api_init);
+
+static void __exit cyttsp5_test_device_access_api_exit(void)
+{
+	u16 actual_read_len;
+	int write_len;
+	int rc;
+
+	/* CASE 6: Run resume scanning */
+	write_len = prepare_resume_scanning_report(write_buf);
+
+	rc = cyttsp5_device_access_user_command(NULL, sizeof(read_buf),
+			read_buf, write_len, write_buf,
+			&actual_read_len);
+	if (rc)
+		goto exit;
+
+	rc = check_resume_scanning_response(read_buf, actual_read_len);
+	if (rc)
+		goto exit;
+
+	pr_info("%s: resume scanning succeeds\n", __func__);
+exit:
+	return;
+}
+module_exit(cyttsp5_test_device_access_api_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Parade TrueTouch(R) Device Access Driver API Tester");
+MODULE_AUTHOR("Parade Technologies <ttdrivers@paradetech.com>");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index ca22483..c1233d0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2599,7 +2599,12 @@
 
 	/* Everything is mapped - write the right values into s->dma_address */
 	for_each_sg(sglist, s, nelems, i) {
-		s->dma_address += address + s->offset;
+		/*
+		 * Add in the remaining piece of the scatter-gather offset that
+		 * was masked out when we were determining the physical address
+		 * via (sg_phys(s) & PAGE_MASK) earlier.
+		 */
+		s->dma_address += address + (s->offset & ~PAGE_MASK);
 		s->dma_length   = s->length;
 	}
 
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 157e934..13bbe57 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -318,7 +318,7 @@
 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 {
 	u64 start = iommu->exclusion_start & PAGE_MASK;
-	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
 	u64 entry;
 
 	if (!iommu->exclusion_start)
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 63110fb..d51734e 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -143,7 +143,7 @@
 		for (tmp = dev; tmp; tmp = tmp->bus->self)
 			level++;
 
-	size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+	size = sizeof(*info) + level * sizeof(info->path[0]);
 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
 	} else {
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 86e3496..28feb17 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1636,6 +1636,9 @@
 	u32 pmen;
 	unsigned long flags;
 
+	if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+		return;
+
 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
 	pmen = readl(iommu->reg + DMAR_PMEN_REG);
 	pmen &= ~DMA_PMEN_EPM;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d68a552..3085b47 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -207,7 +207,8 @@
 		if (dma != virt_to_phys(table))
 			goto out_unmap;
 	}
-	kmemleak_ignore(table);
+	if (lvl == 2)
+		kmemleak_ignore(table);
 	return table;
 
 out_unmap:
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 9305964..c4eb293 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -91,7 +91,6 @@
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
 #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
 #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
-#define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
 #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
 #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
@@ -194,8 +193,12 @@
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_MATCH_ALL;
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
@@ -205,8 +208,12 @@
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_SECTION(iova);
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
@@ -216,8 +223,12 @@
 {
 	u32 value;
 
-	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
-		SMMU_TLB_FLUSH_VA_GROUP(iova);
+	if (smmu->soc->num_asids == 4)
+		value = (asid & 0x3) << 29;
+	else
+		value = (asid & 0x7f) << 24;
+
+	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
 }
 
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index aa72907..0390603 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -22,6 +22,15 @@
 #define AR71XX_RESET_REG_MISC_INT_ENABLE	4
 
 #define ATH79_MISC_IRQ_COUNT			32
+#define ATH79_MISC_PERF_IRQ			5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+	return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
@@ -113,6 +122,8 @@
 {
 	void __iomem *base = domain->host_data;
 
+	ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
 	/* Disable and clear all interrupts */
 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
 	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 05d87f6..406bfe6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -160,6 +160,9 @@
 	void __iomem *base = d->chip_data;
 	u32 val;
 
+	if (!msg->address_lo && !msg->address_hi)
+		return;
+ 
 	base += get_mbigen_vec_reg(d->hwirq);
 	val = readl_relaxed(base);
 
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 480c2d7..8feb8e9 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4370,7 +4370,8 @@
 	if (m->clock2)
 		test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
 
-	if (ent->device == 0xB410) {
+	if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+	    ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
 		test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
 		test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
 		test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 99e5f97..f96b8f2 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -712,10 +712,10 @@
 	struct sock *sk = sock->sk;
 	int err = 0;
 
-	if (!maddr || maddr->family != AF_ISDN)
+	if (addr_len < sizeof(struct sockaddr_mISDN))
 		return -EINVAL;
 
-	if (addr_len < sizeof(struct sockaddr_mISDN))
+	if (!maddr || maddr->family != AF_ISDN)
 		return -EINVAL;
 
 	lock_sock(sk);
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 5377f22..e265595 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -201,7 +201,7 @@
 
 	if (!fw) {
 		dev_err(dev, "firmware request failed\n");
-		goto out;
+		return;
 	}
 
 	/* handling firmware data is chip dependent */
@@ -214,9 +214,9 @@
 
 	mutex_unlock(&chip->lock);
 
-out:
 	/* firmware should be released for other channel use */
 	release_firmware(chip->fw);
+	chip->fw = NULL;
 }
 
 static int lp55xx_request_firmware(struct lp55xx_chip *chip)
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 09a7cffb..896b38f 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -488,6 +488,7 @@
 	const struct i2c_device_id *id)
 {
 	int devid;
+	const struct of_device_id *of_id;
 	struct pca9532_data *data = i2c_get_clientdata(client);
 	struct pca9532_platform_data *pca9532_pdata =
 			dev_get_platdata(&client->dev);
@@ -503,8 +504,11 @@
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
-		devid = (int)(uintptr_t)of_match_device(
-			of_pca9532_leds_match, &client->dev)->data;
+		of_id = of_match_device(of_pca9532_leds_match,
+				&client->dev);
+		if (unlikely(!of_id))
+			return -EINVAL;
+		devid = (int)(uintptr_t) of_id->data;
 	} else {
 		devid = id->driver_data;
 	}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 410c39c..4600f95 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
 #include "../leds.h"
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 121df39..fadad29 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -582,4 +582,17 @@
 	  any more after all the data blocks it covers have been verified anyway.
 
 	  If unsure, say N.
+
+config DM_BOW
+	tristate "Backup block device"
+	depends on BLK_DEV_DM
+	select DM_BUFIO
+	---help---
+	  This device-mapper target takes a device and keeps a log of all
+	  changes using free blocks identified by issuing a trim command.
+	  This can then be restored by running a command line utility,
+	  or committed by simply replacing the target.
+
+	  If unsure, say N.
+
 endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index c8dec9c..6535d500 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -62,6 +62,7 @@
 obj-$(CONFIG_DM_LOG_WRITES)	+= dm-log-writes.o
 obj-$(CONFIG_DM_REQ_CRYPT)	+= dm-req-crypt.o
 obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
+obj-$(CONFIG_DM_BOW)		+= dm-bow.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs			+= dm-uevent.o
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 08f20b7..c76a017 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -513,11 +513,11 @@
 				  ca->sb.nr_this_dev);
 	}
 
-	bkey_init(k);
-	SET_KEY_PTRS(k, n);
-
-	if (n)
+	if (n) {
+		bkey_init(k);
+		SET_KEY_PTRS(k, n);
 		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+	}
 out:
 	if (!journal_full(&c->journal))
 		__closure_wake_up(&c->journal.wait);
@@ -642,6 +642,9 @@
 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
 	}
 
+	/* If KEY_PTRS(k) == 0, this jset gets lost in air */
+	BUG_ON(i == 0);
+
 	atomic_dec_bug(&fifo_back(&c->journal.pin));
 	bch_journal_next(&c->journal);
 	journal_reclaim(c);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 11c9953..8031bb2 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1357,6 +1357,7 @@
 	bch_btree_cache_free(c);
 	bch_journal_free(c);
 
+	mutex_lock(&bch_register_lock);
 	for_each_cache(ca, c, i)
 		if (ca) {
 			ca->set = NULL;
@@ -1379,7 +1380,6 @@
 		mempool_destroy(c->search);
 	kfree(c->devices);
 
-	mutex_lock(&bch_register_lock);
 	list_del(&c->list);
 	mutex_unlock(&bch_register_lock);
 
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 5a5c1f1..463ce67 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -215,7 +215,9 @@
 	d_strtoul(writeback_rate_d_term);
 	d_strtoul_nonzero(writeback_rate_p_term_inverse);
 
-	d_strtoi_h(sequential_cutoff);
+	sysfs_strtoul_clamp(sequential_cutoff,
+			    dc->sequential_cutoff,
+			    0, UINT_MAX);
 	d_strtoi_h(readahead);
 
 	if (attr == &sysfs_clear_stats)
@@ -645,8 +647,17 @@
 		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
 
 	/* See count_io_errors() for why 88 */
-	if (attr == &sysfs_io_error_halflife)
-		c->error_decay = strtoul_or_return(buf) / 88;
+	if (attr == &sysfs_io_error_halflife) {
+		unsigned long v = 0;
+		ssize_t ret;
+
+		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
+		if (!ret) {
+			c->error_decay = v / 88;
+			return size;
+		}
+		return ret;
+	}
 
 	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
 	sysfs_strtoul(verify,			c->verify);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 0526fe9..e7a3c12 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -80,9 +80,16 @@
 
 #define sysfs_strtoul_clamp(file, var, min, max)			\
 do {									\
-	if (attr == &sysfs_ ## file)					\
-		return strtoul_safe_clamp(buf, var, min, max)		\
-			?: (ssize_t) size;				\
+	if (attr == &sysfs_ ## file) {					\
+		unsigned long v = 0;					\
+		ssize_t ret;						\
+		ret = strtoul_safe_clamp(buf, v, min, max);		\
+		if (!ret) {						\
+			var = v;					\
+			return size;					\
+		}							\
+		return ret;						\
+	}								\
 } while (0)
 
 #define strtoul_or_return(cp)						\
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index cdf8d25..6fb6dd6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -68,6 +68,9 @@
 	    in_use > CUTOFF_WRITEBACK_SYNC)
 		return false;
 
+	if (bio_op(bio) == REQ_OP_DISCARD)
+		return false;
+
 	if (dc->partial_stripes_expensive &&
 	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
 				    bio_sectors(bio)))
diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c
new file mode 100644
index 0000000..4e7f6c0
--- /dev/null
+++ b/drivers/md/dm-bow.c
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (C) 2018 Google Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-bufio.h"
+#include "dm-core.h"
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "bow"
+#define SECTOR_SIZE 512
+
+struct log_entry {
+	u64 source;
+	u64 dest;
+	u32 size;
+	u32 checksum;
+} __packed;
+
+struct log_sector {
+	u32 magic;
+	u16 header_version;
+	u16 header_size;
+	u32 block_size;
+	u32 count;
+	u32 sequence;
+	sector_t sector0;
+	struct log_entry entries[];
+} __packed;
+
+/*
+ * MAGIC is BOW in ascii
+ */
+#define MAGIC 0x00574f42
+#define HEADER_VERSION 0x0100
+
+/*
+ * A sorted set of ranges representing the state of the data on the device.
+ * Use an rb_tree for fast lookup of a given sector
+ * Consecutive ranges are always of different type - operations on this
+ * set must merge matching consecutive ranges.
+ *
+ * Top range is always of type TOP
+ */
+struct bow_range {
+	struct rb_node		node;
+	sector_t		sector;
+	enum {
+		INVALID,	/* Type not set */
+		SECTOR0,	/* First sector - holds log record */
+		SECTOR0_CURRENT,/* Live contents of sector0 */
+		UNCHANGED,	/* Original contents */
+		TRIMMED,	/* Range has been trimmed */
+		CHANGED,	/* Range has been changed */
+		BACKUP,		/* Range is being used as a backup */
+		TOP,		/* Final range - sector is size of device */
+	} type;
+	struct list_head	trimmed_list; /* list of TRIMMED ranges */
+};
+
+static const char * const readable_type[] = {
+	"Invalid",
+	"Sector0",
+	"Sector0_current",
+	"Unchanged",
+	"Free",
+	"Changed",
+	"Backup",
+	"Top",
+};
+
+enum state {
+	TRIM,
+	CHECKPOINT,
+	COMMITTED,
+};
+
+struct bow_context {
+	struct dm_dev *dev;
+	u32 block_size;
+	u32 block_shift;
+	struct workqueue_struct *workqueue;
+	struct dm_bufio_client *bufio;
+	struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
+	struct rb_root ranges;
+	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
+	atomic_t state; /* One of the enum state values above */
+	u64 trims_total;
+	struct log_sector *log_sector;
+	struct list_head trimmed_list;
+	bool forward_trims;
+};
+
+sector_t range_top(struct bow_range *br)
+{
+	return container_of(rb_next(&br->node), struct bow_range, node)
+		->sector;
+}
+
+u64 range_size(struct bow_range *br)
+{
+	return (range_top(br) - br->sector) * SECTOR_SIZE;
+}
+
+static sector_t bvec_top(struct bvec_iter *bi_iter)
+{
+	return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
+}
+
+/*
+ * Find the first range that overlaps with bi_iter
+ * bi_iter is set to the size of the overlapping sub-range
+ */
+static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
+						      struct bvec_iter *bi_iter)
+{
+	struct rb_node *node = ranges->rb_node;
+	struct bow_range *br;
+
+	while (node) {
+		br = container_of(node, struct bow_range, node);
+
+		if (br->sector <= bi_iter->bi_sector
+		    && bi_iter->bi_sector < range_top(br))
+			break;
+
+		if (bi_iter->bi_sector < br->sector)
+			node = node->rb_left;
+		else
+			node = node->rb_right;
+	}
+
+	WARN_ON(!node);
+	if (!node)
+		return NULL;
+
+	if (range_top(br) - bi_iter->bi_sector
+	    < bi_iter->bi_size >> SECTOR_SHIFT)
+		bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
+			<< SECTOR_SHIFT;
+
+	return br;
+}
+
+void add_before(struct rb_root *ranges, struct bow_range *new_br,
+		struct bow_range *existing)
+{
+	struct rb_node *parent = &(existing->node);
+	struct rb_node **link = &(parent->rb_left);
+
+	while (*link) {
+		parent = *link;
+		link = &((*link)->rb_right);
+	}
+
+	rb_link_node(&new_br->node, parent, link);
+	rb_insert_color(&new_br->node, ranges);
+}
+
+/*
+ * Given a range br returned by find_first_overlapping_range, split br into a
+ * leading range, a range matching the bi_iter and a trailing range.
+ * Leading and trailing may end up size 0 and will then be deleted. The
+ * new range matching the bi_iter is then returned and should have its type
+ * and type specific fields populated.
+ * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
+ */
+static int split_range(struct bow_context *bc, struct bow_range **br,
+		       struct bvec_iter *bi_iter)
+{
+	struct bow_range *new_br;
+
+	if (bi_iter->bi_sector < (*br)->sector) {
+		WARN_ON(true);
+		return -EIO;
+	}
+
+	if (bi_iter->bi_sector > (*br)->sector) {
+		struct bow_range *leading_br =
+			kzalloc(sizeof(*leading_br), GFP_KERNEL);
+
+		if (!leading_br)
+			return -ENOMEM;
+
+		*leading_br = **br;
+		if (leading_br->type == TRIMMED)
+			list_add(&leading_br->trimmed_list, &bc->trimmed_list);
+
+		add_before(&bc->ranges, leading_br, *br);
+		(*br)->sector = bi_iter->bi_sector;
+	}
+
+	if (bvec_top(bi_iter) >= range_top(*br)) {
+		bi_iter->bi_size = (range_top(*br) - (*br)->sector)
+					* SECTOR_SIZE;
+		return 0;
+	}
+
+	/* new_br will be the beginning, existing br will be the tail */
+	new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
+	if (!new_br)
+		return -ENOMEM;
+
+	new_br->sector = (*br)->sector;
+	(*br)->sector = bvec_top(bi_iter);
+	add_before(&bc->ranges, new_br, *br);
+	*br = new_br;
+
+	return 0;
+}
+
+/*
+ * Sets type of a range. May merge range into surrounding ranges
+ * Since br may be invalidated, always sets br to NULL to prevent
+ * usage after this is called
+ */
+static void set_type(struct bow_context *bc, struct bow_range **br, int type)
+{
+	struct bow_range *prev = container_of(rb_prev(&(*br)->node),
+						      struct bow_range, node);
+	struct bow_range *next = container_of(rb_next(&(*br)->node),
+						      struct bow_range, node);
+
+	if ((*br)->type == TRIMMED) {
+		bc->trims_total -= range_size(*br);
+		list_del(&(*br)->trimmed_list);
+	}
+
+	if (type == TRIMMED) {
+		bc->trims_total += range_size(*br);
+		list_add(&(*br)->trimmed_list, &bc->trimmed_list);
+	}
+
+	(*br)->type = type;
+
+	if (next->type == type) {
+		if (type == TRIMMED)
+			list_del(&next->trimmed_list);
+		rb_erase(&next->node, &bc->ranges);
+		kfree(next);
+	}
+
+	if (prev->type == type) {
+		if (type == TRIMMED)
+			list_del(&(*br)->trimmed_list);
+		rb_erase(&(*br)->node, &bc->ranges);
+		kfree(*br);
+	}
+
+	*br = NULL;
+}
+
+static struct bow_range *find_free_range(struct bow_context *bc)
+{
+	if (list_empty(&bc->trimmed_list)) {
+		DMERR("Unable to find free space to back up to");
+		return NULL;
+	}
+
+	return list_first_entry(&bc->trimmed_list, struct bow_range,
+				trimmed_list);
+}
+
+static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
+{
+	WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
+		!= 0);
+	return sector >> (bc->block_shift - SECTOR_SHIFT);
+}
+
+static int copy_data(struct bow_context const *bc,
+		     struct bow_range *source, struct bow_range *dest,
+		     u32 *checksum)
+{
+	int i;
+
+	if (range_size(source) != range_size(dest)) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	if (checksum)
+		*checksum = sector_to_page(bc, source->sector);
+
+	for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
+		struct dm_buffer *read_buffer, *write_buffer;
+		u8 *read, *write;
+		sector_t page = sector_to_page(bc, source->sector) + i;
+
+		read = dm_bufio_read(bc->bufio, page, &read_buffer);
+		if (IS_ERR(read)) {
+			DMERR("Cannot read page %llu",
+			      (unsigned long long)page);
+			return PTR_ERR(read);
+		}
+
+		if (checksum)
+			*checksum = crc32(*checksum, read, bc->block_size);
+
+		write = dm_bufio_new(bc->bufio,
+				     sector_to_page(bc, dest->sector) + i,
+				     &write_buffer);
+		if (IS_ERR(write)) {
+			DMERR("Cannot write sector");
+			dm_bufio_release(read_buffer);
+			return PTR_ERR(write);
+		}
+
+		memcpy(write, read, bc->block_size);
+
+		dm_bufio_mark_buffer_dirty(write_buffer);
+		dm_bufio_release(write_buffer);
+		dm_bufio_release(read_buffer);
+	}
+
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return 0;
+}
+
+/****** logging functions ******/
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum);
+
+static int backup_log_sector(struct bow_context *bc)
+{
+	struct bow_range *first_br, *free_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+
+	if (first_br->type != SECTOR0) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	free_br = find_free_range(bc);
+	/* No space left - return this error to userspace */
+	if (!free_br)
+		return -ENOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+	if (bi_iter.bi_size != bc->block_size) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence++;
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return 0;
+}
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum)
+{
+	struct dm_buffer *sector_buffer;
+	u8 *sector;
+
+	if (sizeof(struct log_sector)
+	    + sizeof(struct log_entry) * (bc->log_sector->count + 1)
+		> bc->block_size) {
+		int ret = backup_log_sector(bc);
+
+		if (ret)
+			return ret;
+	}
+
+	sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
+	if (IS_ERR(sector)) {
+		DMERR("Cannot write boot sector");
+		dm_bufio_release(sector_buffer);
+		return -ENOSPC;
+	}
+
+	bc->log_sector->entries[bc->log_sector->count].source = source;
+	bc->log_sector->entries[bc->log_sector->count].dest = dest;
+	bc->log_sector->entries[bc->log_sector->count].size = size;
+	bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
+	bc->log_sector->count++;
+
+	memcpy(sector, bc->log_sector, bc->block_size);
+	dm_bufio_mark_buffer_dirty(sector_buffer);
+	dm_bufio_release(sector_buffer);
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return 0;
+}
+
+static int prepare_log(struct bow_context *bc)
+{
+	struct bow_range *free_br, *first_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	/* Carve out first sector as log sector */
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+	if (first_br->type != UNCHANGED) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	if (range_size(first_br) < bc->block_size) {
+		WARN_ON(1);
+		return -EIO;
+	}
+	bi_iter.bi_sector = 0;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &first_br, &bi_iter);
+	if (ret)
+		return ret;
+	first_br->type = SECTOR0;
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+	/* Find free sector for active sector0 reads/writes */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return -ENOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+	free_br->type = SECTOR0_CURRENT;
+
+	/* Copy data */
+	ret = copy_data(bc, first_br, free_br, NULL);
+	if (ret)
+		return ret;
+
+	bc->log_sector->sector0 = free_br->sector;
+
+	/* Find free sector to back up original sector zero */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return -ENOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+
+	/* Back up */
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set up our replacement boot sector - it will get written when we
+	 * add the first log entry, which we do immediately
+	 */
+	bc->log_sector->magic = MAGIC;
+	bc->log_sector->header_version = HEADER_VERSION;
+	bc->log_sector->header_size = sizeof(*bc->log_sector);
+	bc->log_sector->block_size = bc->block_size;
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence = 0;
+
+	/* Add log entry */
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return 0;
+}
+
+static struct bow_range *find_sector0_current(struct bow_context *bc)
+{
+	struct bvec_iter bi_iter;
+
+	bi_iter.bi_sector = bc->log_sector->sector0;
+	bi_iter.bi_size = bc->block_size;
+	return find_first_overlapping_range(&bc->ranges, &bi_iter);
+}
+
+/****** sysfs interface functions ******/
+
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
+}
+
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	enum state state, original_state;
+	int ret;
+
+	state = buf[0] - '0';
+	if (state < TRIM || state > COMMITTED) {
+		DMERR("State value %d out of range", state);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bc->ranges_lock);
+	original_state = atomic_read(&bc->state);
+	if (state != original_state + 1) {
+		DMERR("Invalid state change from %d to %d",
+		      original_state, state);
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
+	       : state == COMMITTED ? "Committed" : "Unknown");
+
+	if (state == CHECKPOINT) {
+		ret = prepare_log(bc);
+		if (ret) {
+			DMERR("Failed to switch to checkpoint state");
+			goto bad;
+		}
+	} else if (state == COMMITTED) {
+		struct bow_range *br = find_sector0_current(bc);
+		struct bow_range *sector0_br =
+			container_of(rb_first(&bc->ranges), struct bow_range,
+				     node);
+
+		ret = copy_data(bc, br, sector0_br, 0);
+		if (ret) {
+			DMERR("Failed to switch to committed state");
+			goto bad;
+		}
+	}
+	atomic_inc(&bc->state);
+	ret = count;
+
+bad:
+	mutex_unlock(&bc->ranges_lock);
+	return ret;
+}
+
+static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	u64 trims_total;
+
+	mutex_lock(&bc->ranges_lock);
+	trims_total = bc->trims_total;
+	mutex_unlock(&bc->ranges_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
+}
+
+static struct kobj_attribute attr_state = __ATTR_RW(state);
+static struct kobj_attribute attr_free = __ATTR_RO(free);
+
+static struct attribute *bow_attrs[] = {
+	&attr_state.attr,
+	&attr_free.attr,
+	NULL
+};
+
+static struct kobj_type bow_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = bow_attrs,
+	.release = dm_kobject_release
+};
+
+/****** constructor/destructor ******/
+
+static void dm_bow_dtr(struct dm_target *ti)
+{
+	struct bow_context *bc = (struct bow_context *) ti->private;
+	struct kobject *kobj;
+
+	while (rb_first(&bc->ranges)) {
+		struct bow_range *br = container_of(rb_first(&bc->ranges),
+						    struct bow_range, node);
+
+		rb_erase(&br->node, &bc->ranges);
+		kfree(br);
+	}
+	if (bc->workqueue)
+		destroy_workqueue(bc->workqueue);
+	if (bc->bufio)
+		dm_bufio_client_destroy(bc->bufio);
+
+	kobj = &bc->kobj_holder.kobj;
+	if (kobj->state_initialized) {
+		kobject_put(kobj);
+		wait_for_completion(dm_get_completion_from_kobject(kobj));
+	}
+
+	kfree(bc->log_sector);
+	kfree(bc);
+}
+
+static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct bow_context *bc;
+	struct bow_range *br;
+	int ret;
+	struct mapped_device *md = dm_table_get_md(ti->table);
+
+	if (argc != 1) {
+		ti->error = "Invalid argument count";
+		return -EINVAL;
+	}
+
+	bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+	if (!bc) {
+		ti->error = "Cannot allocate bow context";
+		return -ENOMEM;
+	}
+
+	ti->num_flush_bios = 1;
+	ti->num_discard_bios = 1;
+	ti->num_write_same_bios = 1;
+	ti->private = bc;
+
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &bc->dev);
+	if (ret) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	if (bc->dev->bdev->bd_queue->limits.max_discard_sectors == 0) {
+		bc->dev->bdev->bd_queue->limits.discard_granularity = 1 << 12;
+		bc->dev->bdev->bd_queue->limits.max_hw_discard_sectors = 1 << 15;
+		bc->dev->bdev->bd_queue->limits.max_discard_sectors = 1 << 15;
+		bc->forward_trims = false;
+	} else {
+		bc->forward_trims = true;
+	}
+
+	bc->block_size = bc->dev->bdev->bd_queue->limits.logical_block_size;
+	bc->block_shift = ilog2(bc->block_size);
+	bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
+	if (!bc->log_sector) {
+		ti->error = "Cannot allocate log sector";
+		goto bad;
+	}
+
+	init_completion(&bc->kobj_holder.completion);
+	ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
+				   &disk_to_dev(dm_disk(md))->kobj, "%s",
+				   "bow");
+	if (ret) {
+		ti->error = "Cannot create sysfs node";
+		goto bad;
+	}
+
+	mutex_init(&bc->ranges_lock);
+	bc->ranges = RB_ROOT;
+	bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
+					   NULL, NULL);
+	if (IS_ERR(bc->bufio)) {
+		ti->error = "Cannot initialize dm-bufio";
+		ret = PTR_ERR(bc->bufio);
+		bc->bufio = NULL;
+		goto bad;
+	}
+
+	bc->workqueue = alloc_workqueue("dm-bow",
+					WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
+					| WQ_UNBOUND, num_online_cpus());
+	if (!bc->workqueue) {
+		ti->error = "Cannot allocate workqueue";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	INIT_LIST_HEAD(&bc->trimmed_list);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = ti->len;
+	br->type = TOP;
+	rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = 0;
+	br->type = UNCHANGED;
+	rb_link_node(&br->node, bc->ranges.rb_node,
+		     &bc->ranges.rb_node->rb_left);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	ti->discards_supported = true;
+
+	return 0;
+
+bad:
+	dm_bow_dtr(ti);
+	return ret;
+}
+
+/****** Handle writes ******/
+
+static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
+				   struct bvec_iter *bi_iter,
+				   bool record_checksum)
+{
+	struct bow_range *backup_br;
+	struct bvec_iter backup_bi;
+	sector_t log_source, log_dest;
+	unsigned int log_size;
+	u32 checksum = 0;
+	int ret;
+	int original_type;
+	sector_t sector0;
+
+	/* Find a free range */
+	backup_br = find_free_range(bc);
+	if (!backup_br)
+		return -ENOSPC;
+
+	/* Carve out a backup range. This may be smaller than the br given */
+	backup_bi.bi_sector = backup_br->sector;
+	backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
+	ret = split_range(bc, &backup_br, &backup_bi);
+	if (ret)
+		return ret;
+
+	/*
+	 * Carve out a changed range. This will not be smaller than the backup
+	 * br since the backup br is smaller than the source range and iterator
+	 */
+	bi_iter->bi_size = backup_bi.bi_size;
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	if (range_size(br) != range_size(backup_br)) {
+		WARN_ON(1);
+		return -EIO;
+	}
+
+
+	/* Copy data over */
+	ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
+	if (ret)
+		return ret;
+
+	/* Add an entry to the log */
+	log_source = br->sector;
+	log_dest = backup_br->sector;
+	log_size = range_size(br);
+
+	/*
+	 * Set the types. Note that since set_type also amalgamates ranges
+	 * we have to set both sectors to their final type before calling
+	 * set_type on either
+	 */
+	original_type = br->type;
+	sector0 = backup_br->sector;
+	if (backup_br->type == TRIMMED)
+		list_del(&backup_br->trimmed_list);
+	backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
+						      : BACKUP;
+	br->type = CHANGED;
+	set_type(bc, &backup_br, backup_br->type);
+
+	/*
+	 * Add the log entry after marking the backup sector, since adding a log
+	 * can cause another backup
+	 */
+	ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
+	if (ret) {
+		br->type = original_type;
+		return ret;
+	}
+
+	/* Now it is safe to mark this backup successful */
+	if (original_type == SECTOR0_CURRENT)
+		bc->log_sector->sector0 = sector0;
+
+	set_type(bc, &br, br->type);
+	return ret;
+}
+
+static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
+			      struct bvec_iter *bi_iter)
+{
+	int ret;
+
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	set_type(bc, &br, CHANGED);
+	return 0;
+}
+
+static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
+				 struct bvec_iter *bi_iter)
+{
+	/* Nothing to do ... */
+	return 0;
+}
+
+static int prepare_one_range(struct bow_context *bc,
+			     struct bvec_iter *bi_iter)
+{
+	struct bow_range *br = find_first_overlapping_range(&bc->ranges,
+							    bi_iter);
+	switch (br->type) {
+	case CHANGED:
+		return prepare_changed_range(bc, br, bi_iter);
+
+	case TRIMMED:
+		return prepare_free_range(bc, br, bi_iter);
+
+	case UNCHANGED:
+	case BACKUP:
+		return prepare_unchanged_range(bc, br, bi_iter, true);
+
+	/*
+	 * We cannot track the checksum for the active sector0, since it
+	 * may change at any point.
+	 */
+	case SECTOR0_CURRENT:
+		return prepare_unchanged_range(bc, br, bi_iter, false);
+
+	case SECTOR0:	/* Handled in the dm_bow_map */
+	case TOP:	/* Illegal - top is off the end of the device */
+	default:
+		WARN_ON(1);
+		return -EIO;
+	}
+}
+
+struct write_work {
+	struct work_struct work;
+	struct bow_context *bc;
+	struct bio *bio;
+};
+
+static void bow_write(struct work_struct *work)
+{
+	struct write_work *ww = container_of(work, struct write_work, work);
+	struct bow_context *bc = ww->bc;
+	struct bio *bio = ww->bio;
+	struct bvec_iter bi_iter = bio->bi_iter;
+	int ret = 0;
+
+	kfree(ww);
+
+	mutex_lock(&bc->ranges_lock);
+	do {
+		ret = prepare_one_range(bc, &bi_iter);
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+	} while (!ret && bi_iter.bi_size);
+
+	mutex_unlock(&bc->ranges_lock);
+
+	if (!ret) {
+		bio->bi_bdev = bc->dev->bdev;
+		submit_bio(bio);
+	} else {
+		DMERR("Write failure with error %d", -ret);
+		bio->bi_error = ret;
+		bio_endio(bio);
+	}
+}
+
+static int queue_write(struct bow_context *bc, struct bio *bio)
+{
+	struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
+					| __GFP_NOMEMALLOC | __GFP_NOWARN);
+	if (!ww) {
+		DMERR("Failed to allocate write_work");
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&ww->work, bow_write);
+	ww->bc = bc;
+	ww->bio = bio;
+	queue_work(bc->workqueue, &ww->work);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int handle_sector0(struct bow_context *bc, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+
+	if (bio->bi_iter.bi_size > bc->block_size) {
+		struct bio * split = bio_split(bio,
+					       bc->block_size >> SECTOR_SHIFT,
+					       GFP_NOIO,
+					       fs_bio_set);
+		if (!split) {
+			DMERR("Failed to split bio");
+			bio->bi_error = -ENOMEM;
+			bio_endio(bio);
+			return DM_MAPIO_SUBMITTED;
+		}
+
+		bio_chain(split, bio);
+		split->bi_iter.bi_sector = bc->log_sector->sector0;
+		split->bi_bdev = bc->dev->bdev;
+		submit_bio(split);
+
+		if (bio_data_dir(bio) == WRITE)
+			ret = queue_write(bc, bio);
+	} else {
+		bio->bi_iter.bi_sector = bc->log_sector->sector0;
+	}
+
+	return ret;
+}
+
+static int add_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("add_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, TRIMMED);
+			break;
+
+		case TRIMMED:
+			/* Nothing to do */
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	bio_endio(bio);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int remove_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("remove_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			/* Nothing to do */
+			break;
+
+		case TRIMMED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, UNCHANGED);
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	return DM_MAPIO_REMAPPED;
+}
+
+int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
+{
+	if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
+		bio->bi_error = -EINVAL;
+		bio_endio(bio);
+		return DM_MAPIO_SUBMITTED;
+	} else {
+		bio->bi_bdev = bc->dev->bdev;
+		return DM_MAPIO_REMAPPED;
+	}
+}
+
+/****** dm interface ******/
+
+static int dm_bow_map(struct dm_target *ti, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+	struct bow_context *bc = ti->private;
+
+	if (likely(bc->state.counter == COMMITTED))
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (atomic_read(&bc->state) != COMMITTED) {
+		enum state state;
+
+		mutex_lock(&bc->ranges_lock);
+		state = atomic_read(&bc->state);
+		if (state == TRIM) {
+			if (bio_op(bio) == REQ_OP_DISCARD)
+				ret = add_trim(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = remove_trim(bc, bio);
+			else
+				/* pass-through */;
+		} else if (state == CHECKPOINT) {
+			if (bio->bi_iter.bi_sector == 0)
+				ret = handle_sector0(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = queue_write(bc, bio);
+			else
+				/* pass-through */;
+		} else {
+			/* pass-through */
+		}
+		mutex_unlock(&bc->ranges_lock);
+	}
+
+	if (ret == DM_MAPIO_REMAPPED)
+		return remap_unless_illegal_trim(bc, bio);
+
+	return ret;
+}
+
+static void dm_bow_tablestatus(struct dm_target *ti, char *result,
+			       unsigned int maxlen)
+{
+	char *end = result + maxlen;
+	struct bow_context *bc = ti->private;
+	struct rb_node *i;
+	int trimmed_list_length = 0;
+	int trimmed_range_count = 0;
+	struct bow_range *br;
+
+	if (maxlen == 0)
+		return;
+	result[0] = 0;
+
+	list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
+		if (br->type == TRIMMED) {
+			++trimmed_list_length;
+		} else {
+			scnprintf(result, end - result,
+				  "ERROR: non-trimmed entry in trimmed_list");
+			return;
+		}
+
+	if (!rb_first(&bc->ranges)) {
+		scnprintf(result, end - result, "ERROR: Empty ranges");
+		return;
+	}
+
+	if (container_of(rb_first(&bc->ranges), struct bow_range, node)
+	    ->sector) {
+		scnprintf(result, end - result,
+			 "ERROR: First range does not start at sector 0");
+		return;
+	}
+
+	for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
+		struct bow_range *br = container_of(i, struct bow_range, node);
+
+		result += scnprintf(result, end - result, "%s: %llu",
+				    readable_type[br->type],
+				    (unsigned long long)br->sector);
+		if (result >= end)
+			return;
+
+		result += scnprintf(result, end - result, "\n");
+		if (result >= end)
+			return;
+
+		if (br->type == TRIMMED)
+			++trimmed_range_count;
+
+		if (br->type == TOP) {
+			if (br->sector != ti->len) {
+				scnprintf(result, end - result,
+					 "\nERROR: Top sector is incorrect");
+			}
+
+			if (&br->node != rb_last(&bc->ranges)) {
+				scnprintf(result, end - result,
+					  "\nERROR: Top sector is not last");
+			}
+
+			break;
+		}
+
+		if (!rb_next(i)) {
+			scnprintf(result, end - result,
+				  "\nERROR: Last range not of type TOP");
+			return;
+		}
+
+		if (br->sector > range_top(br)) {
+			scnprintf(result, end - result,
+				  "\nERROR: sectors out of order");
+			return;
+		}
+	}
+
+	if (trimmed_range_count != trimmed_list_length)
+		scnprintf(result, end - result,
+			  "\nERROR: not all trimmed ranges in trimmed list");
+}
+
+static void dm_bow_status(struct dm_target *ti, status_type_t type,
+			  unsigned int status_flags, char *result,
+			  unsigned int maxlen)
+{
+	switch (type) {
+	case STATUSTYPE_INFO:
+		if (maxlen)
+			result[0] = 0;
+		break;
+
+	case STATUSTYPE_TABLE:
+		dm_bow_tablestatus(ti, result, maxlen);
+		break;
+	}
+}
+
+int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+			 fmode_t *mode)
+{
+	struct bow_context *bc = ti->private;
+	struct dm_dev *dev = bc->dev;
+
+	*bdev = dev->bdev;
+	/* Only pass ioctls through if the device sizes match exactly. */
+	return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int dm_bow_iterate_devices(struct dm_target *ti,
+				  iterate_devices_callout_fn fn, void *data)
+{
+	struct bow_context *bc = ti->private;
+
+	return fn(ti, bc->dev, 0, ti->len, data);
+}
+
+static struct target_type bow_target = {
+	.name   = "bow",
+	.version = {1, 1, 1},
+	.module = THIS_MODULE,
+	.ctr    = dm_bow_ctr,
+	.dtr    = dm_bow_dtr,
+	.map    = dm_bow_map,
+	.status = dm_bow_status,
+	.prepare_ioctl  = dm_bow_prepare_ioctl,
+	.iterate_devices = dm_bow_iterate_devices,
+};
+
+int __init dm_bow_init(void)
+{
+	int r = dm_register_target(&bow_target);
+
+	if (r < 0)
+		DMERR("registering bow failed %d", r);
+	return r;
+}
+
+void dm_bow_exit(void)
+{
+	dm_unregister_target(&bow_target);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(dm_bow_init);
+module_exit(dm_bow_exit);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index cc70871..3f01c52 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -222,7 +222,8 @@
 {
 	struct delay_c *dc = ti->private;
 
-	destroy_workqueue(dc->kdelayd_wq);
+	if (dc->kdelayd_wq)
+		destroy_workqueue(dc->kdelayd_wq);
 
 	dm_put_device(ti, dc->dev_read);
 
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8ca97b2..b8a935a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3295,6 +3295,13 @@
 	as.argc = argc;
 	as.argv = argv;
 
+	/* make sure metadata and data are different devices */
+	if (!strcmp(argv[0], argv[1])) {
+		ti->error = "Error setting metadata or data device";
+		r = -EINVAL;
+		goto out_unlock;
+	}
+
 	/*
 	 * Set default pool features.
 	 */
@@ -4177,6 +4184,12 @@
 	tc->sort_bio_list = RB_ROOT;
 
 	if (argc == 3) {
+		if (!strcmp(argv[0], argv[2])) {
+			ti->error = "Error setting origin device";
+			r = -EINVAL;
+			goto bad_origin_dev;
+		}
+
 		r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
 		if (r) {
 			ti->error = "Error opening origin device";
diff --git a/drivers/md/md.c b/drivers/md/md.c
index bf2d2df..527ba28 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2694,8 +2694,10 @@
 			err = 0;
 		}
 	} else if (cmd_match(buf, "re-add")) {
-		if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
-			rdev->saved_raid_disk >= 0) {
+		if (!rdev->mddev->pers)
+			err = -EINVAL;
+		else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+				rdev->saved_raid_disk >= 0) {
 			/* clear_bit is performed _after_ all the devices
 			 * have their local Faulty bit cleared. If any writes
 			 * happen in the meantime in the local node, they
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f1ad80b..3d2cd54 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3798,6 +3798,8 @@
 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
 							"reshape");
+		if (!mddev->sync_thread)
+			goto out_free_conf;
 	}
 
 	return 0;
@@ -4489,7 +4491,6 @@
 	atomic_inc(&r10_bio->remaining);
 	read_bio->bi_next = NULL;
 	generic_make_request(read_bio);
-	sector_nr += nr_sectors;
 	sectors_done += nr_sectors;
 	if (sector_nr <= last)
 		goto read_more;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d15e29d..7196bc3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3878,7 +3878,7 @@
 		/* now write out any block on a failed drive,
 		 * or P or Q if they were recomputed
 		 */
-		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
+		dev = NULL;
 		if (s->failed == 2) {
 			dev = &sh->dev[s->failed_num[1]];
 			s->locked++;
@@ -3903,6 +3903,14 @@
 			set_bit(R5_LOCKED, &dev->flags);
 			set_bit(R5_Wantwrite, &dev->flags);
 		}
+		if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
+			      "%s: disk%td not up to date\n",
+			      mdname(conf->mddev),
+			      dev - (struct r5dev *) &sh->dev)) {
+			clear_bit(R5_LOCKED, &dev->flags);
+			clear_bit(R5_Wantwrite, &dev->flags);
+			s->locked--;
+		}
 		clear_bit(STRIPE_DEGRADED, &sh->state);
 
 		set_bit(STRIPE_INSYNC, &sh->state);
@@ -6977,6 +6985,8 @@
 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
 							"reshape");
+		if (!mddev->sync_thread)
+			goto abort;
 	}
 
 	/* Ok, everything is just fine now */
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 72e71b7..a614587 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -974,6 +974,8 @@
 	mt9m111->rect.top	= MT9M111_MIN_DARK_ROWS;
 	mt9m111->rect.width	= MT9M111_MAX_WIDTH;
 	mt9m111->rect.height	= MT9M111_MAX_HEIGHT;
+	mt9m111->width		= mt9m111->rect.width;
+	mt9m111->height		= mt9m111->rect.height;
 	mt9m111->fmt		= &mt9m111_colour_fmts[0];
 	mt9m111->lastpage	= -1;
 	mutex_init(&mt9m111->power_lock);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 56cfb5c..0a72228 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -155,10 +155,10 @@
 #define REG_GFIX	0x69	/* Fix gain control */
 
 #define REG_DBLV	0x6b	/* PLL control an debugging */
-#define   DBLV_BYPASS	  0x00	  /* Bypass PLL */
-#define   DBLV_X4	  0x01	  /* clock x4 */
-#define   DBLV_X6	  0x10	  /* clock x6 */
-#define   DBLV_X8	  0x11	  /* clock x8 */
+#define   DBLV_BYPASS	  0x0a	  /* Bypass PLL */
+#define   DBLV_X4	  0x4a	  /* clock x4 */
+#define   DBLV_X6	  0x8a	  /* clock x6 */
+#define   DBLV_X8	  0xca	  /* clock x8 */
 
 #define REG_REG76	0x76	/* OV's name */
 #define   R76_BLKPCOR	  0x80	  /* Black pixel correction enable */
@@ -833,7 +833,7 @@
 	if (ret < 0)
 		return ret;
 
-	return ov7670_write(sd, REG_DBLV, DBLV_X4);
+	return 0;
 }
 
 static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@@ -1578,11 +1578,7 @@
 		if (config->clock_speed)
 			info->clock_speed = config->clock_speed;
 
-		/*
-		 * It should be allowed for ov7670 too when it is migrated to
-		 * the new frame rate formula.
-		 */
-		if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+		if (config->pll_bypass)
 			info->pll_bypass = true;
 
 		if (config->pclk_hb_disable)
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 8f85910..e21b7e1 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -844,6 +844,8 @@
 	if (ret < 0)
 		return ret;
 
+	msleep(20);
+
 	/*
 	 * check and show product ID and manufacturer ID
 	 */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 026e2c7..d119b00 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -4217,6 +4217,12 @@
 		return -EFAULT;
 	}
 
+	/* To make sure num_out_res is same as allocated */
+	if (ctx_data->icp_dev_acquire_info->num_out_res !=
+		icp_dev_acquire_info.num_out_res) {
+		CAM_ERR(CAM_ICP, "num_out_res got changed");
+		return -EFAULT;
+	}
 	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x %u",
 		ctx_data->icp_dev_acquire_info->dev_type,
 		ctx_data->icp_dev_acquire_info->in_res.format,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 4dce293..0b6b7e9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -439,17 +439,29 @@
 	else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT)
 		validate_size = sizeof(struct cam_cmd_unconditional_wait);
 
-	if (remain_buf_len < validate_size) {
+	if (remain_buf_len < validate_size ||
+	    *num_map >= MSM_EEPROM_MAX_MEM_MAP_CNT) {
 		CAM_ERR(CAM_EEPROM, "not enough buffer");
 		return -EINVAL;
 	}
 	switch (cmm_hdr->cmd_type) {
 	case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
 		i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+
+		if (i2c_random_wr->header.count == 0 ||
+		    i2c_random_wr->header.count >= MSM_EEPROM_MAX_MEM_MAP_CNT ||
+		    (size_t)*num_map > U16_MAX - i2c_random_wr->header.count) {
+			CAM_ERR(CAM_EEPROM, "OOB Error");
+			return -EINVAL;
+		}
 		cmd_length_in_bytes   = sizeof(struct cam_cmd_i2c_random_wr) +
 			((i2c_random_wr->header.count - 1) *
 			sizeof(struct i2c_random_wr_payload));
 
+		if (cmd_length_in_bytes > remain_buf_len) {
+			CAM_ERR(CAM_EEPROM, "Not enough buffer remaining");
+			return -EINVAL;
+		}
 		for (cnt = 0; cnt < (i2c_random_wr->header.count);
 			cnt++) {
 			map[*num_map + cnt].page.addr =
@@ -472,6 +484,11 @@
 		i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
 		cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
 
+		if (i2c_cont_rd->header.count >= U32_MAX - data->num_data) {
+			CAM_ERR(CAM_EEPROM,
+				"int overflow on eeprom memory block");
+			return -EINVAL;
+		}
 		map[*num_map].mem.addr = i2c_cont_rd->reg_addr;
 		map[*num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
 		map[*num_map].mem.data_type = i2c_cont_rd->header.data_type;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_core.c
index 144578a..7cac162 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_core.c
@@ -47,7 +47,7 @@
 static int cam_ir_led_set_intensity(struct cam_ir_led_ctrl *ictrl,
 			uint32_t ir_led_intensity)
 {
-	CAM_INFO(CAM_IR_LED, "ir_led_intensity=%d", ir_led_intensity);
+	CAM_DBG(CAM_IR_LED, "ir_led_intensity=%d", ir_led_intensity);
 	switch (ir_led_intensity) {
 	case IRLED_INTENSITY_OFF:
 		gpio_direction_output(
@@ -165,17 +165,17 @@
 
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_IR_LED_PACKET_OPCODE_ON:
-		CAM_INFO(CAM_IR_LED, ":CAM_IR_LED_PACKET_OPCODE_ON");
+		CAM_DBG(CAM_IR_LED, ":CAM_IR_LED_PACKET_OPCODE_ON");
 		cam_ir_cut_on(ictrl);
 		cam_ir_led_set_intensity(ictrl,
 				cam_ir_led_info->ir_led_intensity);
 		break;
 	case CAM_IR_LED_PACKET_OPCODE_OFF:
-		CAM_INFO(CAM_IR_LED, "CAM_IR_LED_PACKET_OPCODE_OFF");
+		CAM_DBG(CAM_IR_LED, "CAM_IR_LED_PACKET_OPCODE_OFF");
 		cam_ir_cut_off(ictrl);
 		break;
 	case CAM_PKT_NOP_OPCODE:
-		CAM_INFO(CAM_IR_LED, "CAM_IR_LED: CAM_PKT_NOP_OPCODE");
+		CAM_DBG(CAM_IR_LED, "CAM_IR_LED: CAM_PKT_NOP_OPCODE");
 		break;
 	default:
 		CAM_ERR(CAM_IR_LED, "Wrong Opcode : %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_dev.c
index 3ba7745..d3b28ea 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ir_led/cam_ir_led_dev.c
@@ -22,7 +22,6 @@
 	int rc = 0;
 	struct cam_control *cmd = (struct cam_control *)arg;
 
-		CAM_ERR(CAM_IR_LED, "%s, IN", __func__);
 	if (!ictrl || !arg) {
 		CAM_ERR(CAM_IR_LED, "ictrl/arg is NULL with arg:%pK ictrl%pK",
 			ictrl, arg);
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index 6f2cfcf..634e34d 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1473,6 +1473,7 @@
 		.name = MSM_FD_DRV_NAME,
 		.owner = THIS_MODULE,
 		.of_match_table = msm_fd_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index fa55d64..4187644 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -415,6 +415,12 @@
 
 #define MSM_VFE_REQUESTQ_SIZE 8
 
+struct msm_isp_pending_buf_info {
+	uint32_t is_buf_done_pending;
+	struct msm_isp_buffer *buf;
+	uint32_t frame_id;
+};
+
 struct msm_vfe_axi_stream {
 	uint32_t frame_id;
 	enum msm_vfe_axi_state state;
@@ -471,6 +477,7 @@
 	uint32_t vfe_mask;
 	uint32_t composite_irq[MSM_ISP_COMP_IRQ_MAX];
 	int lpm_mode;
+	struct msm_isp_pending_buf_info pending_buf_info;
 };
 
 struct msm_vfe_axi_composite_info {
@@ -846,6 +853,9 @@
 	/* total bandwidth per vfe */
 	uint64_t total_bandwidth;
 	struct isp_kstate *isp_page;
+
+	/* irq info */
+	uint32_t irq_sof_id;
 };
 
 struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 75d09a3..45611d4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -712,8 +712,10 @@
 {
 	if (irq_status0 & BIT(3))
 		vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame = false;
-	if (irq_status0 & BIT(0))
+	if (irq_status0 & BIT(0)) {
 		vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame = true;
+		vfe_dev->irq_sof_id++;
+	}
 }
 
 void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 4d4fd78..fbf16d1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -16,9 +16,11 @@
 #include "msm_isp_stats_util.h"
 #include "msm_isp_axi_util.h"
 #include "msm_isp48.h"
+#include "trace/events/msm_cam.h"
 
 #define HANDLE_TO_IDX(handle) (handle & 0xFF)
 #define ISP_SOF_DEBUG_COUNT 0
+#define OTHER_VFE(vfe_id) (vfe_id == ISP_VFE0 ? ISP_VFE1 : ISP_VFE0)
 
 #ifdef CONFIG_MSM_AVTIMER
 static struct avtimer_fptr_t avtimer_func;
@@ -30,6 +32,13 @@
 			struct msm_vfe_axi_stream *stream_info,
 			struct msm_isp_timestamp *ts);
 
+static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+	struct timeval *time_stamp, uint32_t frame_id);
+static void msm_isp_free_pending_buffer(
+	struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream *stream_info,
+	struct msm_isp_timestamp *ts);
 static int msm_isp_update_stream_bandwidth(
 		struct msm_vfe_axi_stream *stream_info, int enable);
 
@@ -621,7 +630,8 @@
 		drop_reconfig != 1)
 		stream_info->current_framedrop_period =
 			MSM_VFE_STREAM_STOP_PERIOD;
-
+	if (stream_info->controllable_output && drop_reconfig == 1)
+		stream_info->current_framedrop_period = 1;
 	/*
 	 * re-configure the period pattern, only if it's not already
 	 * set to what we want
@@ -669,12 +679,29 @@
 		case MSM_ISP_COMP_IRQ_REG_UPD:
 			stream_info->activated_framedrop_period =
 				stream_info->requested_framedrop_period;
+			/* Free Pending Buffers which are backed-up due to
+			 * delay in RUP from userspace to Avoid pageFault
+			 */
+			msm_isp_free_pending_buffer(vfe_dev, stream_info, ts);
 			__msm_isp_axi_stream_update(stream_info, ts);
 			break;
 		case MSM_ISP_COMP_IRQ_EPOCH:
-			if (stream_info->state == ACTIVE)
+			if (stream_info->state == ACTIVE) {
+				struct vfe_device *temp = NULL;
+				struct msm_vfe_common_dev_data *c_data;
+				uint32_t drop_reconfig =
+					vfe_dev->isp_page->drop_reconfig;
+				if (stream_info->num_isp > 1 &&
+					vfe_dev->pdev->id == ISP_VFE0) {
+					c_data = vfe_dev->common_data;
+					temp = c_data->dual_vfe_res->vfe_dev[
+						ISP_VFE1];
+					drop_reconfig =
+						temp->isp_page->drop_reconfig;
+				}
 				msm_isp_update_framedrop_reg(stream_info,
-					vfe_dev->isp_page->drop_reconfig);
+					drop_reconfig);
+			}
 			break;
 		default:
 			WARN(1, "Invalid irq %d\n", irq);
@@ -1050,8 +1077,12 @@
 			vfe_dev->isp_raw2_debug++;
 		}
 
-		ISP_DBG("%s: vfe %d frame_src %d\n", __func__,
-			vfe_dev->pdev->id, frame_src);
+		ISP_DBG("%s: vfe %d frame_src %d frameid %d\n", __func__,
+			vfe_dev->pdev->id, frame_src,
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+		trace_msm_cam_isp_status_dump("SOFNOTIFY:", vfe_dev->pdev->id,
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+			0, 0);
 
 		/*
 		 * Cannot support dual_cam and framedrop same time in union.
@@ -1567,6 +1598,40 @@
 	}
 }
 
+static void msm_isp_free_pending_buffer(
+			struct vfe_device *vfe_dev,
+			struct msm_vfe_axi_stream *stream_info,
+			struct msm_isp_timestamp *ts)
+{
+	struct timeval *time_stamp;
+	struct msm_isp_buffer *done_buf = NULL;
+	uint32_t frame_id;
+	int rc;
+
+	if (!stream_info->controllable_output ||
+		!stream_info->pending_buf_info.is_buf_done_pending)	{
+		return;
+	}
+
+	if (vfe_dev->vt_enable) {
+		msm_isp_get_avtimer_ts(ts);
+		time_stamp = &ts->vt_time;
+	} else {
+		time_stamp = &ts->buf_time;
+	}
+
+	done_buf = stream_info->pending_buf_info.buf;
+	frame_id = stream_info->pending_buf_info.frame_id;
+	if (done_buf) {
+		rc = msm_isp_process_done_buf(vfe_dev, stream_info,
+			done_buf, time_stamp, frame_id);
+		if (rc == 0) {
+			stream_info->pending_buf_info.buf = NULL;
+			stream_info->pending_buf_info.is_buf_done_pending = 0;
+		}
+	}
+}
+
 static void __msm_isp_axi_stream_update(
 			struct msm_vfe_axi_stream *stream_info,
 			struct msm_isp_timestamp *ts)
@@ -2118,7 +2183,6 @@
 	uint32_t buf_src;
 	uint8_t drop_frame = 0;
 	struct msm_isp_bufq *bufq = NULL;
-
 	memset(&buf_event, 0, sizeof(buf_event));
 
 	if (stream_idx >= VFE_AXI_SRC_MAX) {
@@ -2448,6 +2512,7 @@
 			continue;
 		/* activate the input since it is deactivated */
 		axi_data->src_info[i].frame_id = 0;
+		vfe_dev->irq_sof_id = 0;
 		if (axi_data->src_info[i].input_mux != EXTERNAL_READ)
 			axi_data->src_info[i].active = 1;
 		if (i >= VFE_RAW_0 && sync_frame_id_src) {
@@ -2792,6 +2857,7 @@
 			axi_data->src_info[SRC_TO_INTF(stream_info->
 				stream_src)].frame_id =
 				reset_cmd->frame_id;
+			temp_vfe_dev->irq_sof_id = reset_cmd->frame_id;
 		}
 		msm_isp_reset_burst_count_and_frame_drop(
 			vfe_dev, stream_info);
@@ -3064,6 +3130,12 @@
 		msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
 		msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
 		stream_info->undelivered_request_cnt = 0;
+		if (stream_info->controllable_output &&
+			stream_info->pending_buf_info.is_buf_done_pending) {
+			msm_isp_free_pending_buffer(vfe_dev, stream_info,
+				&timestamp);
+			stream_info->pending_buf_info.is_buf_done_pending = 0;
+		}
 		for (k = 0; k < stream_info->num_isp; k++) {
 			vfe_dev = stream_info->vfe_dev[k];
 			if (stream_info->num_planes > 1)
@@ -3226,7 +3298,6 @@
 			mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
 			goto error;
 		}
-
 		msm_isp_calculate_bandwidth(stream_info);
 		for (k = 0; k < stream_info->num_isp; k++) {
 			msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
@@ -3573,13 +3644,17 @@
 	 */
 	if (vfe_dev->axi_data.src_info[frame_src].active &&
 		frame_src == VFE_PIX_0 &&
-		vfe_dev->axi_data.src_info[frame_src].accept_frame == false) {
+		vfe_dev->axi_data.src_info[frame_src].accept_frame == false &&
+		(stream_info->undelivered_request_cnt <=
+			MAX_BUFFERS_IN_HW)
+		) {
 		pr_debug("%s:%d invalid time to request frame %d\n",
 			__func__, __LINE__, frame_id);
 		vfe_dev->isp_page->drop_reconfig = 1;
 	} else if ((vfe_dev->axi_data.src_info[frame_src].active) &&
-			(frame_id ==
-			vfe_dev->axi_data.src_info[frame_src].frame_id) &&
+			((frame_id ==
+			vfe_dev->axi_data.src_info[frame_src].frame_id) ||
+			(frame_id == vfe_dev->irq_sof_id)) &&
 			(stream_info->undelivered_request_cnt <=
 				MAX_BUFFERS_IN_HW)) {
 		vfe_dev->isp_page->drop_reconfig = 1;
@@ -4225,6 +4300,8 @@
 	struct timeval *time_stamp;
 	uint32_t frame_id, buf_index = -1;
 	int vfe_idx;
+	struct vfe_device *temp_dev;
+	int other_vfe_id;
 
 	if (!ts) {
 		pr_err("%s: Error! Invalid argument\n", __func__);
@@ -4322,15 +4399,42 @@
 			ISP_DBG("%s: Error configuring ping_pong\n",
 				__func__);
 	} else if (done_buf && (done_buf->is_drop_reconfig != 1)) {
+		int32_t frame_id_diff;
+		/* irq_sof should be always >= tasklet SOF id
+		 * For dual camera usecase irq_sof could be behind
+		 * as software frameid sync logic epoch event could
+		 * update slave frame id so update if irqsof < tasklet sof
+		 */
+		if (vfe_dev->irq_sof_id < frame_id)
+			vfe_dev->irq_sof_id = frame_id;
+
+		frame_id_diff = vfe_dev->irq_sof_id - frame_id;
+		if (stream_info->controllable_output && frame_id_diff > 1) {
+			/* scheduling problem need to do recovery */
+			spin_unlock_irqrestore(&stream_info->lock, flags);
+			msm_isp_halt_send_error(vfe_dev,
+				ISP_EVENT_PING_PONG_MISMATCH);
+			return;
+		}
 		msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
 	}
-
 	if (!done_buf) {
 		if (stream_info->buf_divert) {
 			vfe_dev->error_info.stream_framedrop_count[
 				stream_info->bufq_handle[
 				VFE_BUF_QUEUE_DEFAULT] & 0xFF]++;
 			vfe_dev->error_info.framedrop_flag = 1;
+			if (vfe_dev->is_split) {
+				other_vfe_id = OTHER_VFE(vfe_dev->pdev->id);
+				temp_dev =
+				vfe_dev->common_data->dual_vfe_res->vfe_dev[
+					other_vfe_id];
+				temp_dev->error_info.stream_framedrop_count[
+				stream_info->bufq_handle[
+				VFE_BUF_QUEUE_DEFAULT] & 0xFF]++;
+				temp_dev->error_info.framedrop_flag = 1;
+			}
+
 		}
 		spin_unlock_irqrestore(&stream_info->lock, flags);
 		return;
@@ -4373,11 +4477,28 @@
 	 * then dont issue buf-done for current buffer
 	 */
 		done_buf->is_drop_reconfig = 0;
+		if (!stream_info->buf[pingpong_bit]) {
+			/* samebuffer is not re-programeed so write scratch */
+			msm_isp_cfg_stream_scratch(stream_info,
+				pingpong_status);
+		}
 		spin_unlock_irqrestore(&stream_info->lock, flags);
 	} else {
+		/* If there is no regupdate from userspace then dont
+		 * free buffer immediately, delegate it to RegUpdateAck
+		 */
+		if (stream_info->controllable_output &&
+			!(vfe_dev->reg_update_requested &
+				BIT((uint32_t)VFE_PIX_0))) {
+			stream_info->pending_buf_info.is_buf_done_pending = 1;
+			stream_info->pending_buf_info.buf = done_buf;
+			stream_info->pending_buf_info.frame_id = frame_id;
+		}
 		spin_unlock_irqrestore(&stream_info->lock, flags);
-		msm_isp_process_done_buf(vfe_dev, stream_info,
-			done_buf, time_stamp, frame_id);
+		if (stream_info->pending_buf_info.is_buf_done_pending != 1) {
+			msm_isp_process_done_buf(vfe_dev, stream_info,
+				done_buf, time_stamp, frame_id);
+		}
 	}
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 431609f..47ac471 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2125,6 +2125,9 @@
 		return;
 	}
 	atomic_add(1, &vfe_dev->irq_cnt);
+	trace_msm_cam_isp_status_dump("VFE_IRQ:", vfe_dev->pdev->id,
+		vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+		irq_status0, irq_status1);
 	queue_cmd->vfeInterruptStatus0 = irq_status0;
 	queue_cmd->vfeInterruptStatus1 = irq_status1;
 	queue_cmd->vfe_pingpong_status = ping_pong_status;
@@ -2224,6 +2227,9 @@
 		atomic_sub(1, &vfe_dev->irq_cnt);
 		msm_isp_prepare_tasklet_debug_info(vfe_dev,
 			irq_status0, irq_status1, ts);
+		trace_msm_cam_isp_status_dump("VFE_TASKLET:", vfe_dev->pdev->id,
+			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+			irq_status0, irq_status1);
 		irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
 		irq_ops->process_reset_irq(vfe_dev,
 			irq_status0, irq_status1);
@@ -2327,7 +2333,7 @@
 	vfe_dev->isp_raw0_debug = 0;
 	vfe_dev->isp_raw1_debug = 0;
 	vfe_dev->isp_raw2_debug = 0;
-
+	vfe_dev->irq_sof_id = 0;
 	if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
 		pr_err("%s: init hardware failed\n", __func__);
 		vfe_dev->vfe_open_cnt--;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c
index 0811efb..a366b01 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -297,6 +297,7 @@
 		.name = "msm_jpeg",
 		.owner = THIS_MODULE,
 		.of_match_table = msm_jpeg_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
index ea82832..0592526 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -907,7 +907,7 @@
 
 void msm_jpeg_io_dump(void *base, int size)
 {
-	char line_str[128];
+	char line_str[140];
 	void __iomem *addr = (void __iomem *)base;
 	int i;
 	void __iomem *p = addr;
@@ -919,10 +919,10 @@
 
 	JPEG_DBG_HIGH("%s:%d] %pK %d", __func__, __LINE__, addr, size);
 	line_str[0] = '\0';
-	for (i = 0; i < size/4; i++) {
+	for (i = 0; i < size; i = i+4) {
 		if (i % 4 == 0) {
 			used = snprintf(line_str + offset,
-				sizeof_line_str - offset, "%pK ", p);
+				sizeof_line_str - offset, "%pK", p+i);
 			if ((used < min_range) ||
 				(offset + used >= sizeof_line_str)) {
 				JPEG_PR_ERR("%s\n", line_str);
@@ -932,9 +932,9 @@
 				offset += used;
 			}
 		}
-		data = msm_camera_io_r(p++);
+		data = msm_camera_io_r(p+i);
 		used = snprintf(line_str + offset,
-			sizeof_line_str - offset, "%08x ", data);
+			sizeof_line_str - offset, " - %08x ", data);
 		if ((used < min_range) ||
 			(offset + used >= sizeof_line_str)) {
 			JPEG_PR_ERR("%s\n", line_str);
@@ -943,11 +943,6 @@
 		} else {
 			offset += used;
 		}
-		if ((i + 1) % 4 == 0) {
-			JPEG_DBG_HIGH("%s\n", line_str);
-			line_str[0] = '\0';
-			offset = 0;
-		}
 	}
 	if (line_str[0] != '\0')
 		JPEG_DBG_HIGH("%s\n", line_str);
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
index dc316b1..bc3c5c6 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -892,8 +892,12 @@
 	void *fh, struct v4l2_buffer *buf)
 {
 	struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+	int ret;
 
-	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+	mutex_lock(&ctx->lock);
+	ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+	mutex_unlock(&ctx->lock);
+	return ret;
 }
 
 /*
@@ -908,13 +912,15 @@
 	struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
 	int ret;
 
-	if (!msm_jpegdma_config_ok(ctx))
+	mutex_lock(&ctx->lock);
+	if (!msm_jpegdma_config_ok(ctx)) {
+		mutex_unlock(&ctx->lock);
 		return -EINVAL;
-
+	}
 	ret = v4l2_m2m_streamon(file, ctx->m2m_ctx, buf_type);
 	if (ret < 0)
 		dev_err(ctx->jdma_device->dev, "Stream on fail\n");
-
+	mutex_unlock(&ctx->lock);
 	return ret;
 }
 
@@ -1488,6 +1494,7 @@
 		.name = MSM_JPEGDMA_DRV_NAME,
 		.owner = THIS_MODULE,
 		.of_match_table = msm_jpegdma_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 85ca275..576d5d6 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1453,15 +1453,6 @@
 		VBIF_CLIENT_CPP, cpp_vbif_error_handler);
 
 	if (cpp_dev->cpp_open_cnt == 1) {
-		rc = cpp_init_hardware(cpp_dev);
-		if (rc < 0) {
-			cpp_dev->cpp_open_cnt--;
-			cpp_dev->cpp_subscribe_list[i].active = 0;
-			cpp_dev->cpp_subscribe_list[i].vfh = NULL;
-			mutex_unlock(&cpp_dev->mutex);
-			return rc;
-		}
-
 		rc = cpp_init_mem(cpp_dev);
 		if (rc < 0) {
 			pr_err("Error: init memory fail\n");
@@ -1472,6 +1463,14 @@
 			return rc;
 		}
 
+		rc = cpp_init_hardware(cpp_dev);
+		if (rc < 0) {
+			cpp_dev->cpp_open_cnt--;
+			cpp_dev->cpp_subscribe_list[i].active = 0;
+			cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+			mutex_unlock(&cpp_dev->mutex);
+			return rc;
+		}
 		cpp_dev->state = CPP_STATE_IDLE;
 
 		CPP_DBG("Invoking msm_ion_client_create()\n");
@@ -1491,6 +1490,8 @@
 {
 	uint32_t i;
 	int rc = -1;
+	int counter = 0;
+	u32 result = 0;
 	struct cpp_device *cpp_dev = NULL;
 	struct msm_device_queue *processing_q = NULL;
 	struct msm_device_queue *eventData_q = NULL;
@@ -1569,6 +1570,61 @@
 			msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
 		pr_debug("DEBUG_R1: 0x%x\n",
 			msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
+
+		/* Update bandwidth usage to enable AXI/ABH clock,
+		 * which will help to reset CPP AXI.Bandwidth will be
+		 * made zero at cpp_release_hardware.
+		 */
+		msm_cpp_update_bandwidth(cpp_dev, 0x1000, 0x1000);
+
+		/* mask IRQ status */
+		msm_camera_io_w(0xB, cpp_dev->cpp_hw_base + 0xC);
+
+		/* clear IRQ status */
+		msm_camera_io_w(0xFFFFF, cpp_dev->cpp_hw_base + 0x14);
+
+		/* MMSS_A_CPP_AXI_CMD = 0x16C, reset 0x1*/
+		msm_camera_io_w(0x1, cpp_dev->cpp_hw_base + 0x16C);
+
+		while (counter < MSM_CPP_POLL_RETRIES) {
+			result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
+			if (result & 0x2)
+				break;
+			/*
+			 * Below usleep values are chosen based on experiments
+			 * and this was the smallest number which works. This
+			 * sleep is needed to leave enough time for hardware
+			 * to update status register.
+			 */
+			usleep_range(200, 250);
+			counter++;
+		}
+
+		pr_debug("CPP AXI done counter %d result 0x%x\n",
+			counter, result);
+
+		/* clear IRQ status */
+		msm_camera_io_w(0xFFFFF, cpp_dev->cpp_hw_base + 0x14);
+		counter = 0;
+		/* MMSS_A_CPP_RST_CMD_0 = 0x8, firmware reset = 0x3DF77 */
+		msm_camera_io_w(0x3DF77, cpp_dev->cpp_hw_base + 0x8);
+
+		while (counter < MSM_CPP_POLL_RETRIES) {
+			result = msm_camera_io_r(cpp_dev->cpp_hw_base + 0x10);
+			if (result & 0x1)
+				break;
+			/*
+			 * Below usleep values are chosen based on experiments
+			 * and this was the smallest number which works. This
+			 * sleep is needed to leave enough time for hardware
+			 * to update status register.
+			 */
+			usleep_range(200, 250);
+			counter++;
+		}
+		pr_debug("CPP reset done counter %d result 0x%x\n",
+			counter, result);
+
 		msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
 		msm_cpp_clear_timer(cpp_dev);
 		cpp_release_hardware(cpp_dev);
@@ -4765,6 +4821,7 @@
 		.name = MSM_CPP_DRV_NAME,
 		.owner = THIS_MODULE,
 		.of_match_table = msm_cpp_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index c045eda..2806132 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1585,6 +1585,13 @@
 	}
 	kfree(a_ctrl->i2c_reg_tbl);
 	a_ctrl->i2c_reg_tbl = NULL;
+	if (a_ctrl->actuator_state == ACT_OPS_ACTIVE) {
+		rc = msm_actuator_power_down(a_ctrl);
+		if (rc < 0) {
+			pr_err("%s:%d Actuator Power down failed\n",
+				__func__, __LINE__);
+		}
+	}
 	a_ctrl->actuator_state = ACT_DISABLE_STATE;
 	mutex_unlock(a_ctrl->actuator_mutex);
 	CDBG("Exit\n");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
index 90b8e1c..b406045 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016,2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,10 +51,10 @@
 	{0x148, 0xFE},
 	{0x14C, 0x1},
 	{0x154, 0x0},
-	{0x15C, 0x33},
+	{0x15C, 0x23},
 	{0x160, ULPM_WAKE_UP_TIMER_MODE},
-	{0x164, 0x48},
-	{0x168, 0xA0},
+	{0x164, 0x50},
+	{0x168, 0x70},
 	{0x16C, 0x17},
 	{0x170, 0x41},
 	{0x174, 0x41},
diff --git a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
index f4305ea..e8cc920 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,8 +54,10 @@
 			return -EINVAL;
 		}
 		for (i = 0; i < eeprom_map->memory_map_size; i++) {
-			if (eeprom_map->mem_settings[i].i2c_operation ==
-				MSM_CAM_READ) {
+			if ((eeprom_map->mem_settings[i].i2c_operation ==
+				MSM_CAM_READ) ||
+				(eeprom_map->mem_settings[i].i2c_operation ==
+				MSM_CAM_READ_LOOP)) {
 				size += eeprom_map->mem_settings[i].reg_data;
 			}
 		}
@@ -325,8 +327,10 @@
 static int eeprom_parse_memory_map(struct msm_eeprom_ctrl_t *e_ctrl,
 	struct msm_eeprom_memory_map_array *eeprom_map_array)
 {
-	int rc =  0, i, j;
+	int rc =  0, i, j, gc;
 	uint8_t *memptr;
+	uint16_t gc_read = 0;
+
 	struct msm_eeprom_mem_map_t *eeprom_map;
 
 	e_ctrl->cal_data.mapdata = NULL;
@@ -406,9 +410,41 @@
 				memptr += eeprom_map->mem_settings[i].reg_data;
 			}
 			break;
+
+			case MSM_CAM_READ_LOOP: {
+				e_ctrl->i2c_client.addr_type =
+				 eeprom_map->mem_settings[i].addr_type;
+
+				for (gc = 0;
+					gc < eeprom_map->mem_settings[i].
+						reg_data;
+					gc++) {
+					msleep(eeprom_map->mem_settings[i].
+						delay);
+					rc = e_ctrl->i2c_client.i2c_func_tbl->
+						i2c_read(
+						&(e_ctrl->i2c_client),
+						eeprom_map->mem_settings[i].
+						reg_addr,
+						&gc_read,
+						eeprom_map->mem_settings[i].
+						data_type);
+					if (rc < 0) {
+						pr_err("%s: read failed\n",
+							__func__);
+						goto clean_up;
+					}
+					*memptr = (uint8_t)gc_read;
+					memptr++;
+				}
+			}
+			break;
+
 			default:
-				pr_err("%s: %d Invalid i2c operation LC:%d\n",
-					__func__, __LINE__, i);
+				pr_err("%s: %d Invalid i2c operation LC:%d, op: %d\n",
+					__func__, __LINE__, i,
+					eeprom_map->mem_settings[i].
+						i2c_operation);
 				return -EINVAL;
 			}
 		}
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h
index ff8be35..178d33d 100644
--- a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -251,6 +251,17 @@
 	int32_t refcount;
 };
 
+/**
+ * struct cam_cdm_debugfs_entry : debugfs entry struct
+ *
+ * @dentry                       : entry of debugfs
+ * @dump_register                : flag to dump registers
+ */
+struct cam_cdm_debugfs_entry {
+	struct dentry   *dentry;
+	bool             dump_register;
+};
+
 int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
 	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
 	uint32_t *index);
diff --git a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c
index c8c64b4..af83aba 100644
--- a/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera_v3/cam_cdm/cam_cdm_hw_core.c
@@ -38,6 +38,8 @@
 
 static void cam_hw_cdm_work(struct work_struct *work);
 
+static struct cam_cdm_debugfs_entry debugfs_entry;
+
 /* DT match table entry for all CDM variants*/
 static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
 	{
@@ -69,6 +71,31 @@
 	return rc;
 }
 
+static int cam_hw_cdm_create_debugfs_entry(void)
+{
+	int rc = 0;
+
+	debugfs_entry.dentry = debugfs_create_dir("camera_cdm", NULL);
+	if (!debugfs_entry.dentry)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("dump_register",
+		0644,
+		debugfs_entry.dentry,
+		&debugfs_entry.dump_register)) {
+		CAM_ERR(CAM_CDM,
+			"failed to create dump_register entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	return rc;
+err:
+	debugfs_remove_recursive(debugfs_entry.dentry);
+	debugfs_entry.dentry = NULL;
+	return rc;
+}
+
 static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
 	bool enable)
 {
@@ -186,6 +213,9 @@
 {
 	uint32_t dump_reg, core_dbg, loop_cnt;
 
+	if (!debugfs_entry.dump_register)
+		return;
+
 	mutex_lock(&cdm_hw->hw_mutex);
 	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
 	CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
@@ -1020,6 +1050,7 @@
 	}
 	cdm_hw->open_count--;
 	mutex_unlock(&cdm_hw->hw_mutex);
+	cam_hw_cdm_create_debugfs_entry();
 
 	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
 
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c
index 969f3f4..15574ba 100644
--- a/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/cam_icp_context.c
@@ -45,6 +45,14 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&ctx->ctx_mutex);
+
+	if (ctx->state < CAM_CTX_ACQUIRED || ctx->state > CAM_CTX_ACTIVATED) {
+		CAM_ERR(CAM_ICP, "Invalid state icp ctx %d state %d",
+			ctx->ctx_id, ctx->state);
+		goto end;
+	}
+
 	CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
 		ctx->ctx_id, ctx->state);
 
@@ -63,6 +71,8 @@
 				req->request_id, rc);
 	}
 
+end:
+	mutex_unlock(&ctx->ctx_mutex);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2bac029..ca10a3a 100644
--- a/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera_v3/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -64,6 +64,37 @@
 
 static void cam_icp_mgr_process_dbg_buf(unsigned int debug_lvl);
 
+static int cam_icp_dump_io_cfg(struct cam_icp_hw_ctx_data *ctx_data,
+	int32_t buf_handle)
+{
+	uintptr_t vaddr_ptr;
+	uint32_t  *ptr;
+	size_t    len;
+	int       rc, i;
+	char      buf[512];
+	int       used = 0;
+
+	rc = cam_mem_get_cpu_buf(buf_handle, &vaddr_ptr, &len);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get io_cfg buf address for %d",
+			ctx_data->ctx_id);
+		return rc;
+	}
+
+	len = len / sizeof(uint32_t);
+	ptr = (uint32_t *)vaddr_ptr;
+	for (i = 0; i < len; i++) {
+		used += snprintf(buf + used,
+			sizeof(buf) - used, "0X%08X-", ptr[i]);
+		if (!(i % 8)) {
+			CAM_INFO(CAM_ICP, "%s: %s", __func__, buf);
+			used = 0;
+		}
+	}
+
+	return rc;
+}
+
 static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
 {
 	struct cam_hw_intf *a5_dev_intf = NULL;
@@ -4059,8 +4090,12 @@
 
 	packet = prepare_args->packet;
 
-	if (cam_packet_util_validate_packet(packet, prepare_args->remain_len))
+	if (cam_packet_util_validate_packet(packet, prepare_args->remain_len)) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_ICP, "ctx id: %u packet req id %lld validate fail",
+			ctx_data->ctx_id, packet->header.request_id);
 		return -EINVAL;
+	}
 
 	rc = cam_icp_mgr_pkt_validation(packet);
 	if (rc) {
@@ -4681,6 +4716,8 @@
 	rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "IO Config command failed %d", rc);
+		cam_icp_dump_io_cfg(ctx_data,
+			icp_dev_acquire_info->io_config_cmd_handle);
 		goto ioconfig_failed;
 	}
 
diff --git a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 2d6e23c..8fba017 100644
--- a/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera_v3/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -46,7 +46,7 @@
 #define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12
 
 /* Max CSI Rx irq error count threshold value */
-#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT               100
+#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT               5
 
 static int cam_ife_csid_is_ipp_ppp_format_supported(
 	uint32_t in_format)
@@ -1478,15 +1478,13 @@
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
-	CAM_INFO(CAM_ISP, "CSID: %d cnt: %d Halt csi2 rx",
-		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
 
 	/* Disable the CSI2 rx inerrupts */
-	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+	cam_io_w(0, soc_info->reg_map[0].mem_base +
 		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
 
 	/* Reset the Rx CFG registers */
-	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+	cam_io_w(0, soc_info->reg_map[0].mem_base +
 		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
 	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
 		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
@@ -3091,13 +3089,11 @@
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
 		csid_reg->cmn_reg->csid_irq_cmd_addr);
 
-	CAM_DBG(CAM_ISP, "irq_status_top = 0x%x", irq_status_top);
-	CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
-	CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
-	CAM_DBG(CAM_ISP, "irq_status_ppp = 0x%x", irq_status_ppp);
-	CAM_DBG(CAM_ISP, "irq_status_rdi0= 0x%x", irq_status_rdi[0]);
-	CAM_DBG(CAM_ISP, "irq_status_rdi1= 0x%x", irq_status_rdi[1]);
-	CAM_DBG(CAM_ISP, "irq_status_rdi2= 0x%x", irq_status_rdi[2]);
+	CAM_DBG(CAM_ISP,
+		"CSID %d irq status 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+		csid_hw->hw_intf->hw_idx, irq_status_top,
+		irq_status_rx, irq_status_ipp, irq_status_ppp,
+		irq_status_rdi[0], irq_status_rdi[1], irq_status_rdi[2]);
 
 	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
 		CAM_DBG(CAM_ISP, "csi rx reset complete");
@@ -3107,71 +3103,38 @@
 	spin_lock_irqsave(&csid_hw->lock_state, flags);
 	if (csid_hw->device_enabled == 1) {
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
-				 csid_hw->hw_intf->hw_idx);
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
-				 csid_hw->hw_intf->hw_idx);
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
-				 csid_hw->hw_intf->hw_idx);
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
-				 csid_hw->hw_intf->hw_idx);
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
-				 csid_hw->hw_intf->hw_idx);
 			fatal_err_detected = true;
+			goto handle_fatal_error;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d CPHY_EOT_RECEPTION",
-				 csid_hw->hw_intf->hw_idx);
 			csid_hw->error_irq_count++;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d CPHY_SOT_RECEPTION",
-				 csid_hw->hw_intf->hw_idx);
 			csid_hw->error_irq_count++;
 		}
-		if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PH_CRC",
-				 csid_hw->hw_intf->hw_idx);
-		}
-		if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
-				 csid_hw->hw_intf->hw_idx);
-		}
-		if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
-				 csid_hw->hw_intf->hw_idx);
-		}
-		if (irq_status_rx & CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
-				 csid_hw->hw_intf->hw_idx);
-		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d ERROR_STREAM_UNDERFLOW",
-				 csid_hw->hw_intf->hw_idx);
 			csid_hw->error_irq_count++;
 		}
 		if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
-			CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
-				 csid_hw->hw_intf->hw_idx);
 			csid_hw->error_irq_count++;
 		}
 	}
-	spin_unlock_irqrestore(&csid_hw->lock_state, flags);
 
 	if (csid_hw->error_irq_count >
 		CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT) {
@@ -3179,8 +3142,15 @@
 		csid_hw->error_irq_count = 0;
 	}
 
-	if (fatal_err_detected)
+handle_fatal_error:
+	spin_unlock_irqrestore(&csid_hw->lock_state, flags);
+	if (fatal_err_detected) {
+		CAM_INFO(CAM_ISP,
+			"CSID: %d cnt: %d Halt csi2 rx irq_status_rx:0x%x",
+			csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt,
+			irq_status_rx);
 		cam_ife_csid_halt_csi2(csid_hw);
+	}
 
 	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
 		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
@@ -3281,7 +3251,6 @@
 		/* IPP reset done bit */
 		if (irq_status_ipp &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CAM_DBG(CAM_ISP, "CSID IPP reset complete");
 			complete(&csid_hw->csid_ipp_complete);
 		}
 
@@ -3298,19 +3267,17 @@
 			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
 				csid_hw->hw_intf->hw_idx);
 
-		if ((irq_status_ipp & CSID_PATH_ERROR_CCIF_VIOLATION))
-			CAM_INFO_RATE_LIMIT(CAM_ISP,
-				"CSID:%d IPP CCIF violation",
-				csid_hw->hw_intf->hw_idx);
-
-		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+		if ((irq_status_ipp & CSID_PATH_ERROR_CCIF_VIOLATION) ||
+			(irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW)) {
 			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d IPP fifo over flow",
-				csid_hw->hw_intf->hw_idx);
-			/* Stop IPP path immediately */
-			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
-				soc_info->reg_map[0].mem_base +
-				csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+				"CSID:%d irq_status_ipp:0x%x",
+				csid_hw->hw_intf->hw_idx, irq_status_ipp);
+			if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+				/* Stop IPP path immediately */
+				cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+					soc_info->reg_map[0].mem_base +
+					csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+			}
 		}
 	}
 
@@ -3319,7 +3286,6 @@
 		/* PPP reset done bit */
 		if (irq_status_ppp &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CAM_DBG(CAM_ISP, "CSID PPP reset complete");
 			complete(&csid_hw->csid_ppp_complete);
 		}
 
@@ -3336,26 +3302,23 @@
 			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
 				csid_hw->hw_intf->hw_idx);
 
-		if ((irq_status_ipp & CSID_PATH_ERROR_CCIF_VIOLATION))
-			CAM_INFO_RATE_LIMIT(CAM_ISP,
-				"CSID:%d PPP CCIF violation",
-				csid_hw->hw_intf->hw_idx);
-
-		if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+		if ((irq_status_ppp & CSID_PATH_ERROR_CCIF_VIOLATION) ||
+			(irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW)) {
 			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d PPP fifo over flow",
-				csid_hw->hw_intf->hw_idx);
-			/* Stop PPP path immediately */
-			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
-				soc_info->reg_map[0].mem_base +
-				csid_reg->ppp_reg->csid_pxl_ctrl_addr);
+				"CSID:%d irq_status_ppp:0x%x",
+				csid_hw->hw_intf->hw_idx, irq_status_ppp);
+			if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+				/* Stop PPP path immediately */
+				cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+					soc_info->reg_map[0].mem_base +
+					csid_reg->ppp_reg->csid_pxl_ctrl_addr);
+			}
 		}
 	}
 
 	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
 		if (irq_status_rdi[i] &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
 			complete(&csid_hw->csid_rdin_complete[i]);
 		}
 
@@ -3372,14 +3335,14 @@
 			CAM_INFO_RATE_LIMIT(CAM_ISP,
 				"CSID RDI:%d EOF received", i);
 
-		if ((irq_status_rdi[i] & CSID_PATH_ERROR_CCIF_VIOLATION))
-			CAM_INFO_RATE_LIMIT(CAM_ISP,
-			"CSIDi RDI :%d CCIF violation", i);
-
-		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+		if ((irq_status_rdi[i] & CSID_PATH_ERROR_CCIF_VIOLATION) ||
+			(irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW)) {
 			CAM_ERR_RATE_LIMIT(CAM_ISP,
-				"CSID:%d RDI fifo over flow",
-				csid_hw->hw_intf->hw_idx);
+				"CSID:%d irq_status_rdi[%d]:0x%x",
+				csid_hw->hw_intf->hw_idx, i,
+				irq_status_rdi[i]);
+		}
+		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
 			/* Stop RDI path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
 				soc_info->reg_map[0].mem_base +
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 85b8e37..318ea55 100644
--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -232,13 +232,18 @@
 	csiphy_dev->csiphy_info.csiphy_3phase =
 		cam_cmd_csiphy_info->csiphy_3phase;
 	csiphy_dev->csiphy_info.combo_mode |= cam_cmd_csiphy_info->combo_mode;
-	if (cam_cmd_csiphy_info->combo_mode == 1)
+	if (cam_cmd_csiphy_info->combo_mode == 1) {
 		csiphy_dev->csiphy_info.settle_time_combo_sensor =
 			cam_cmd_csiphy_info->settle_time;
-	else
+		csiphy_dev->csiphy_info.data_rate_combo_sensor =
+			cam_cmd_csiphy_info->data_rate;
+	} else {
 		csiphy_dev->csiphy_info.settle_time =
 			cam_cmd_csiphy_info->settle_time;
-	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
+		csiphy_dev->csiphy_info.data_rate =
+			cam_cmd_csiphy_info->data_rate;
+	}
+
 
 	if (cam_cmd_csiphy_info->secure_mode == 1)
 		cam_csiphy_update_secure_info(csiphy_dev,
@@ -269,6 +274,65 @@
 			csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr);
 }
 
+void cam_csiphy_cphy_data_rate_config(struct csiphy_device *csiphy_device)
+{
+	int i = 0, j = 0;
+	uint64_t phy_data_rate = 0;
+	void __iomem *csiphybase = NULL;
+	ssize_t num_table_entries = 0;
+	struct data_rate_settings_t *settings_table = NULL;
+
+	if ((csiphy_device == NULL) ||
+		(csiphy_device->ctrl_reg == NULL) ||
+		(csiphy_device->ctrl_reg->data_rates_settings_table == NULL)) {
+		CAM_DBG(CAM_CSIPHY,
+			"Data rate specific register table not found");
+		return;
+	}
+
+	phy_data_rate = csiphy_device->csiphy_info.data_rate;
+	csiphybase =
+		csiphy_device->soc_info.reg_map[0].mem_base;
+	settings_table =
+		csiphy_device->ctrl_reg->data_rates_settings_table;
+	num_table_entries =
+		settings_table->num_data_rate_settings;
+
+	CAM_DBG(CAM_CSIPHY, "required data rate : %llu", phy_data_rate);
+	for (i = 0; i < num_table_entries; i++) {
+		struct data_rate_reg_info_t *drate_settings =
+			settings_table->data_rate_settings;
+		uint64_t bandwidth =
+			drate_settings[i].bandwidth;
+		ssize_t  num_reg_entries =
+		drate_settings[i].data_rate_reg_array_size;
+
+		if (phy_data_rate > bandwidth) {
+			CAM_DBG(CAM_CSIPHY,
+					"Skipping table [%d] %llu required: %llu",
+					i, bandwidth, phy_data_rate);
+			continue;
+		}
+
+		CAM_DBG(CAM_CSIPHY,
+			"table[%d] BW : %llu Selected", i, bandwidth);
+		for (j = 0; j < num_reg_entries; j++) {
+			uint32_t reg_addr =
+			drate_settings[i].csiphy_data_rate_regs[j].reg_addr;
+
+			uint32_t reg_data =
+			drate_settings[i].csiphy_data_rate_regs[j].reg_data;
+
+			CAM_DBG(CAM_CSIPHY,
+				"writing reg : %x val : %x",
+						reg_addr, reg_data);
+			cam_io_w_mb(reg_data,
+				csiphybase + reg_addr);
+		}
+		break;
+	}
+}
+
 void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
 {
 	int32_t i;
@@ -476,6 +540,9 @@
 		lane_pos++;
 	}
 
+	if (csiphy_dev->csiphy_info.csiphy_3phase)
+		cam_csiphy_cphy_data_rate_config(csiphy_dev);
+
 	cam_csiphy_cphy_irq_config(csiphy_dev);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 51cf9e9..cf81924 100644
--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -44,8 +44,10 @@
 #define MAX_CSIPHY_REG_ARRAY        70
 #define MAX_CSIPHY_CMN_REG_ARRAY    5
 
-#define MAX_LANES             5
-#define MAX_SETTINGS_PER_LANE 43
+#define MAX_LANES                   5
+#define MAX_SETTINGS_PER_LANE       43
+#define MAX_DATA_RATES              3
+#define MAX_DATA_RATE_REGS          30
 
 #define MAX_REGULATOR         5
 #define CAMX_CSIPHY_DEV_NAME "cam-csiphy-driver"
@@ -155,6 +157,32 @@
 	uint32_t csiphy_param_type;
 };
 
+struct csiphy_device;
+
+/*
+ * struct data_rate_reg_info_t
+ * @bandwidth: max bandwidth supported by this reg settings
+ * @data_rate_reg_array_size: number of reg value pairs in the array
+ * @csiphy_data_rate_regs: array of data rate specific reg value pairs
+ */
+struct data_rate_reg_info_t {
+	uint64_t bandwidth;
+	ssize_t  data_rate_reg_array_size;
+	struct csiphy_reg_t csiphy_data_rate_regs[MAX_DATA_RATE_REGS];
+};
+
+/**
+ * struct data_rate_settings_t
+ * @num_data_rate_settings: number of valid settings
+ *                          present in the data rate settings array
+ * @data_rate_settings: array of regsettings which are specific to
+ *                      data rate
+ */
+struct data_rate_settings_t {
+	ssize_t num_data_rate_settings;
+	struct data_rate_reg_info_t data_rate_settings[MAX_DATA_RATES];
+};
+
 /**
  * struct csiphy_ctrl_t
  * @csiphy_reg: Register address
@@ -166,6 +194,12 @@
  * @csiphy_3ph_reg: 3phase register set
  * @csiphy_2ph_3ph_mode_reg:
  *     2 phase 3phase combo register set
+ * @getclockvoting: function pointer which
+ *      is used to find the clock voting
+ *      for the sensor output data rate
+ * @data_rate_settings_table:
+ *      Table which maintains the resgister
+ *      settings specific to data rate
  */
 struct csiphy_ctrl_t {
 	struct csiphy_reg_parms_t csiphy_reg;
@@ -176,6 +210,8 @@
 	struct csiphy_reg_t (*csiphy_2ph_combo_mode_reg)[MAX_SETTINGS_PER_LANE];
 	struct csiphy_reg_t (*csiphy_3ph_reg)[MAX_SETTINGS_PER_LANE];
 	struct csiphy_reg_t (*csiphy_2ph_3ph_mode_reg)[MAX_SETTINGS_PER_LANE];
+	enum   cam_vote_level (*getclockvoting)(struct csiphy_device *phy_dev);
+	struct data_rate_settings_t *data_rates_settings_table;
 };
 
 /**
@@ -190,6 +226,8 @@
  * @settle_time   :  Settling time in ms
  * @settle_time_combo_sensor   :  Settling time in ms
  * @data_rate     :  Data rate in mbps
+ * @data_rate_combo_sensor: data rate of combo sensor
+ *                          in the the same phy
  *
  */
 struct cam_csiphy_param {
@@ -202,6 +240,7 @@
 	uint64_t    settle_time;
 	uint64_t    settle_time_combo_sensor;
 	uint64_t    data_rate;
+	uint64_t    data_rate_combo_sensor;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 0902601..0bf5aac 100644
--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,9 @@
 #include "include/cam_csiphy_1_2_hwreg.h"
 #include "include/cam_csiphy_2_0_hwreg.h"
 
+#define CSIPHY_3PH_DIVISOR           16
+#define CSIPHY_3PH_DIVISOR_12        32
+#define CSIPHY_2PH_DIVISOR           8
 #define BYTES_PER_REGISTER           4
 #define NUM_REGISTER_PER_LINE        4
 #define REG_OFFSET(__start, __i)    ((__start) + ((__i) * BYTES_PER_REGISTER))
@@ -79,10 +82,62 @@
 	return rc;
 }
 
+enum cam_vote_level get_clk_vote_default(struct csiphy_device *csiphy_dev)
+{
+	CAM_DBG(CAM_CSIPHY, "voting for SVS");
+	return CAM_SVS_VOTE;
+}
+
+enum cam_vote_level get_clk_voting_dynamic(struct csiphy_device *csiphy_dev)
+{
+	uint32_t cam_vote_level = 0;
+	uint32_t last_valid_vote = 0;
+	struct cam_hw_soc_info *soc_info;
+	uint64_t phy_data_rate = csiphy_dev->csiphy_info.data_rate;
+
+	soc_info = &csiphy_dev->soc_info;
+
+	if (csiphy_dev->is_acquired_dev_combo_mode)
+		phy_data_rate = max(phy_data_rate,
+			csiphy_dev->csiphy_info.data_rate_combo_sensor);
+
+	if (csiphy_dev->csiphy_info.csiphy_3phase) {
+		if (csiphy_dev->is_csiphy_3phase_hw == CSI_3PHASE_HW_12)
+			do_div(phy_data_rate, CSIPHY_3PH_DIVISOR_12);
+		else
+			do_div(phy_data_rate, CSIPHY_3PH_DIVISOR);
+	} else {
+		do_div(phy_data_rate, CSIPHY_2PH_DIVISOR);
+	}
+
+	 /* round off to next integer */
+	phy_data_rate += 1;
+
+	for (cam_vote_level = 0;
+			cam_vote_level < CAM_MAX_VOTE; cam_vote_level++) {
+		if (soc_info->clk_level_valid[cam_vote_level] != true)
+			continue;
+
+		if (soc_info->clk_rate[cam_vote_level][0] >
+				phy_data_rate) {
+			CAM_DBG(CAM_CSIPHY,
+				"match detected %s : %llu:%d level : %d",
+				soc_info->clk_name[0],
+				phy_data_rate,
+				soc_info->clk_rate[cam_vote_level][0],
+				cam_vote_level);
+			return cam_vote_level;
+		}
+		last_valid_vote = cam_vote_level;
+	}
+	return last_valid_vote;
+}
+
 int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
 {
 	int32_t rc = 0;
 	struct cam_hw_soc_info   *soc_info;
+	enum cam_vote_level vote_level = CAM_SVS_VOTE;
 
 	soc_info = &csiphy_dev->soc_info;
 
@@ -92,8 +147,9 @@
 		return rc;
 	}
 
+	vote_level = csiphy_dev->ctrl_reg->getclockvoting(csiphy_dev);
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		CAM_SVS_VOTE, ENABLE_IRQ);
+		vote_level, ENABLE_IRQ);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
 			rc);
@@ -174,9 +230,11 @@
 		csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_1_0;
 		csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_1_0;
 		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_0;
+		csiphy_dev->ctrl_reg->getclockvoting = get_clk_vote_default;
 		csiphy_dev->hw_version = CSIPHY_VERSION_V10;
 		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
 		csiphy_dev->clk_lane = 0;
+		csiphy_dev->ctrl_reg->data_rates_settings_table = NULL;
 	} else if (of_device_is_compatible(soc_info->dev->of_node,
 		"qcom,csiphy-v1.1")) {
 		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_1_reg;
@@ -191,9 +249,11 @@
 		csiphy_dev->ctrl_reg->csiphy_reset_reg =
 			csiphy_reset_reg_1_1;
 		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_1;
+		csiphy_dev->ctrl_reg->getclockvoting = get_clk_vote_default;
 		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
 		csiphy_dev->hw_version = CSIPHY_VERSION_V11;
 		csiphy_dev->clk_lane = 0;
+		csiphy_dev->ctrl_reg->data_rates_settings_table = NULL;
 	} else if (of_device_is_compatible(soc_info->dev->of_node,
 		"qcom,csiphy-v1.2")) {
 		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_2_reg;
@@ -206,10 +266,13 @@
 			csiphy_common_reg_1_2;
 		csiphy_dev->ctrl_reg->csiphy_reset_reg =
 			csiphy_reset_reg_1_2;
+		csiphy_dev->ctrl_reg->getclockvoting = get_clk_voting_dynamic;
 		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_2;
-		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW_12;
 		csiphy_dev->hw_version = CSIPHY_VERSION_V12;
 		csiphy_dev->clk_lane = 0;
+		csiphy_dev->ctrl_reg->data_rates_settings_table =
+			&data_rate_delta_table;
 	} else if (of_device_is_compatible(soc_info->dev->of_node,
 		"qcom,csiphy-v2.0")) {
 		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v2_0_reg;
@@ -221,9 +284,11 @@
 		csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_2_0;
 		csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_2_0;
 		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_0;
+		csiphy_dev->ctrl_reg->getclockvoting = get_clk_vote_default;
 		csiphy_dev->hw_version = CSIPHY_VERSION_V20;
 		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
 		csiphy_dev->clk_lane = 0;
+		csiphy_dev->ctrl_reg->data_rates_settings_table = NULL;
 	} else {
 		CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
 			csiphy_dev->hw_version);
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
index 68ca68c..64d0561 100644
--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,7 @@
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
 
 #define CSI_3PHASE_HW                               1
+#define CSI_3PHASE_HW_12                          0x12
 #define CSIPHY_VERSION_V35                        0x35
 #define CSIPHY_VERSION_V10                        0x10
 #define CSIPHY_VERSION_V11                        0x11
diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
index 945910e..67653e8 100644
--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
+++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
@@ -19,10 +19,10 @@
 	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
 	.mipi_csiphy_interrupt_clear0_addr = 0x858,
 	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
-	.csiphy_common_array_size = 4,
+	.csiphy_common_array_size = 6,
 	.csiphy_reset_array_size = 5,
 	.csiphy_2ph_config_array_size = 21,
-	.csiphy_3ph_config_array_size = 31,
+	.csiphy_3ph_config_array_size = 38,
 	.csiphy_2ph_clock_lane = 0x1,
 	.csiphy_2ph_combo_ck_ln = 0x10,
 };
@@ -30,8 +30,10 @@
 struct csiphy_reg_t csiphy_common_reg_1_2[] = {
 	{0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
 	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
-	{0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
-	{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x02, 0x00, CSIPHY_2PH_REGS},
+	{0x081C, 0x52, 0x00, CSIPHY_3PH_REGS},
+	{0x0800, 0x02, 0x00, CSIPHY_2PH_REGS},
+	{0x0800, 0x0E, 0x00, CSIPHY_3PH_REGS},
 };
 
 struct csiphy_reg_t csiphy_reset_reg_1_2[] = {
@@ -297,7 +299,7 @@
 struct
 csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
 	{
-		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -305,10 +307,10 @@
 		{0x0994, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0998, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x098C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x016C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x010C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
 		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -321,27 +323,34 @@
 		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0144, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09C0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09C4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09C8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0988, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0980, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
-		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0xBF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x030C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
 		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -354,16 +363,23 @@
 		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0344, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0AB0, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AC0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AC4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AC8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
-		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -371,10 +387,10 @@
 		{0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x050C, 0x07, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
 		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
 		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -387,14 +403,107 @@
 		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0544, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0BB0, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BC0, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BC4, 0x7D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BC8, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B88, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B80, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
 
+struct data_rate_settings_t data_rate_delta_table = {
+	.num_data_rate_settings = 3,
+	.data_rate_settings = {
+		{
+			/* (2.5 * 10**3 * 2.28) rounded value*/
+			.bandwidth = 5700000000,
+			.data_rate_reg_array_size = 12,
+			.csiphy_data_rate_regs = {
+				{0x15C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x35C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x55C, 0x66, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x9B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xAB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xBB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x144, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x344, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x544, 0x22, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x16C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x36C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x56C, 0xAD, 0x00, CSIPHY_DEFAULT_PARAMS},
+			}
+		},
+		{
+			/* (3.5 * 10**3 * 2.28) rounded value */
+			.bandwidth = 7980000000,
+			.data_rate_reg_array_size = 24,
+			.csiphy_data_rate_regs = {
+				{0x15C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x35C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x55C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x9B4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xAB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xBB4, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x9B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xAB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xBB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x144, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x344, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x544, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x13C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x33C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x53C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x140, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x340, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x540, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x16C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x36C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x56C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+			},
+		},
+		{
+			/* (4.5 * 10**3 * 2.28) rounded value */
+			.bandwidth = 10260000000,
+			.data_rate_reg_array_size = 24,
+			.csiphy_data_rate_regs = {
+				{0x15C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x35C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x55C, 0x46, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x9B4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xAB4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xBB4, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x9B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xAB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0xBB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x144, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x344, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x544, 0xA2, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x13C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x33C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x53C, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x140, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x340, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x540, 0x81, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x16C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x36C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+				{0x56C, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+			},
+		}
+	}
+};
 #endif /* _CAM_CSIPHY_1_2_HWREG_H_ */
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 9416ab4..ec88488 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1956,7 +1956,7 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
 		property_id = HAL_CONFIG_VENC_USELTRFRAME;
 		use_ltr.ref_ltr = ctrl->val;
-		use_ltr.use_constraint = false;
+		use_ltr.use_constraint = true;
 		use_ltr.frames = 0;
 		pdata = &use_ltr;
 		break;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 811414a..d8d7985 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -40,6 +40,7 @@
 #define FIRMWARE_SIZE			0X00A00000
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 #define QDSS_IOVA_START 0x80001000
+#define MIN_PAYLOAD_SIZE 3
 
 static struct hal_device_data hal_ctxt;
 
@@ -2971,25 +2972,55 @@
 		log_level = VIDC_ERR;
 	}
 
+#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \
+		if (pkt_size < pkt_hdr_size || \
+			payload_size < MIN_PAYLOAD_SIZE || \
+			payload_size > \
+			(pkt_size - pkt_hdr_size + sizeof(u8))) { \
+			dprintk(VIDC_ERR, \
+				"%s: invalid msg size - %d\n", \
+				__func__, pkt->msg_size); \
+			continue; \
+		} \
+	})
+
 	while (!__iface_dbgq_read(device, packet)) {
-		struct hfi_msg_sys_coverage_packet *pkt =
-			(struct hfi_msg_sys_coverage_packet *) packet;
+		struct hfi_packet_header *pkt =
+			(struct hfi_packet_header *) packet;
+
+		if (pkt->size < sizeof(struct hfi_packet_header)) {
+			dprintk(VIDC_ERR, "Invalid pkt size - %s\n",
+				__func__);
+			continue;
+		}
 
 		if (pkt->packet_type == HFI_MSG_SYS_COV) {
+			struct hfi_msg_sys_coverage_packet *pkt =
+				(struct hfi_msg_sys_coverage_packet *) packet;
 			int stm_size = 0;
 
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
 			stm_size = stm_log_inv_ts(0, 0,
 				pkt->rg_msg_data, pkt->msg_size);
 			if (stm_size == 0)
 				dprintk(VIDC_ERR,
 					"In %s, stm_log returned size of 0\n",
 					__func__);
-		} else {
+
+		} else if (pkt->packet_type == HFI_MSG_SYS_DEBUG) {
 			struct hfi_msg_sys_debug_packet *pkt =
 				(struct hfi_msg_sys_debug_packet *) packet;
+
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
+			pkt->rg_msg_data[pkt->msg_size-1] = '\0';
 			dprintk(log_level, "%s", pkt->rg_msg_data);
 		}
 	}
+#undef SKIP_INVALID_PKT
 
 	if (local_packet)
 		kfree(packet);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index b86f7d5..733cbb9 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -902,6 +902,11 @@
 	u32 session_id;
 };
 
+struct hfi_packet_header {
+	u32 size;
+	u32 packet_type;
+};
+
 struct hfi_cmd_sys_init_packet {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_venc.c b/drivers/media/platform/msm/vidc_3x/msm_venc.c
index 5e98a5c..031c529 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_venc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_venc.c
@@ -3444,7 +3444,7 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
 		property_id = HAL_CONFIG_VENC_USELTRFRAME;
 		use_ltr.ref_ltr = ctrl->val;
-		use_ltr.use_constraint = false;
+		use_ltr.use_constraint = true;
 		use_ltr.frames = 0;
 		pdata = &use_ltr;
 		break;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
index 77452f0..07804d2 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
@@ -1084,7 +1084,7 @@
 		q->ops = msm_venc_get_vb2q_ops();
 	q->mem_ops = &msm_vidc_vb2_mem_ops;
 	q->drv_priv = inst;
-	q->allow_zero_bytesused = !V4L2_TYPE_IS_OUTPUT(type);
+	q->allow_zero_bytesused = 1;
 	return vb2_queue_init(q);
 }
 
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index 1bd6ae8..2d0e353 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
 #define FIRMWARE_SIZE			0X00A00000
 #define REG_ADDR_OFFSET_BITMASK	0x000FFFFF
 #define QDSS_IOVA_START 0x80001000
+#define MIN_PAYLOAD_SIZE 3
 
 static struct hal_device_data hal_ctxt;
 
@@ -3380,24 +3381,55 @@
 		local_packet = true;
 	}
 
+#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \
+		if (pkt_size < pkt_hdr_size || \
+			payload_size < MIN_PAYLOAD_SIZE || \
+			payload_size > \
+			(pkt_size - pkt_hdr_size + sizeof(u8))) { \
+			dprintk(VIDC_ERR, \
+				"%s: invalid msg size - %d\n", \
+				__func__, pkt->msg_size); \
+			continue; \
+		} \
+	})
+
 	while (!__iface_dbgq_read(device, packet)) {
-		struct hfi_msg_sys_coverage_packet *pkt =
-			(struct hfi_msg_sys_coverage_packet *) packet;
+		struct hfi_packet_header *pkt =
+			(struct hfi_packet_header *) packet;
+
+		if (pkt->size < sizeof(struct hfi_packet_header)) {
+			dprintk(VIDC_ERR, "Invalid pkt size - %s\n",
+				__func__);
+			continue;
+		}
+
 		if (pkt->packet_type == HFI_MSG_SYS_COV) {
+			struct hfi_msg_sys_coverage_packet *pkt =
+				(struct hfi_msg_sys_coverage_packet *) packet;
 			int stm_size = 0;
 
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
 			stm_size = stm_log_inv_ts(0, 0,
 				pkt->rg_msg_data, pkt->msg_size);
 			if (stm_size == 0)
 				dprintk(VIDC_ERR,
 					"In %s, stm_log returned size of 0\n",
 					__func__);
-		} else {
+
+		} else if (pkt->packet_type == HFI_MSG_SYS_DEBUG) {
 			struct hfi_msg_sys_debug_packet *pkt =
 				(struct hfi_msg_sys_debug_packet *) packet;
+
+			SKIP_INVALID_PKT(pkt->size,
+				pkt->msg_size, sizeof(*pkt));
+
+			pkt->rg_msg_data[pkt->msg_size-1] = '\0';
 			dprintk(VIDC_FW, "%s", pkt->rg_msg_data);
 		}
 	}
+#undef SKIP_INVALID_PKT
 
 	if (local_packet)
 		kfree(packet);
diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_helper.h
index 39904a5..c09cf84c 100644
--- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_helper.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -974,6 +974,11 @@
 	u32 session_id;
 };
 
+struct hfi_packet_header {
+	u32 size;
+	u32 packet_type;
+};
+
 struct hfi_cmd_sys_init_packet {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index e68d271..8354ad2 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -288,7 +288,7 @@
 {
 	struct emmaprp_ctx *ctx = priv;
 	struct emmaprp_q_data *s_q_data, *d_q_data;
-	struct vb2_buffer *src_buf, *dst_buf;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
 	struct emmaprp_dev *pcdev = ctx->dev;
 	unsigned int s_width, s_height;
 	unsigned int d_width, d_height;
@@ -308,8 +308,8 @@
 	d_height = d_q_data->height;
 	d_size = d_width * d_height;
 
-	p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
-	p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+	p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
 	if (!p_in || !p_out) {
 		v4l2_err(&pcdev->v4l2_dev,
 			 "Acquiring kernel pointers to buffers failed\n");
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 62c0dec..5f6ccf4 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -498,7 +498,7 @@
 {
 	struct g2d_ctx *ctx = prv;
 	struct g2d_dev *dev = ctx->dev;
-	struct vb2_buffer *src, *dst;
+	struct vb2_v4l2_buffer *src, *dst;
 	unsigned long flags;
 	u32 cmd = 0;
 
@@ -513,10 +513,10 @@
 	spin_lock_irqsave(&dev->ctrl_lock, flags);
 
 	g2d_set_src_size(dev, &ctx->in);
-	g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
+	g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
 
 	g2d_set_dst_size(dev, &ctx->out);
-	g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
+	g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
 
 	g2d_set_rop4(dev, ctx->rop);
 	g2d_set_flip(dev, ctx->flip);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 1da2c94..c89922f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -789,14 +789,14 @@
 static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	struct s5p_jpeg_buffer jpeg_buffer;
 	unsigned int word;
 	int c, x, components;
 
 	jpeg_buffer.size = 2; /* Ls */
 	jpeg_buffer.data =
-		(unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
+		(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
 	jpeg_buffer.curr = 0;
 
 	word = 0;
@@ -826,14 +826,14 @@
 static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	struct s5p_jpeg_buffer jpeg_buffer;
 	unsigned int word;
 	int c, i, n, j;
 
 	for (j = 0; j < ctx->out_q.dht.n; ++j) {
 		jpeg_buffer.size = ctx->out_q.dht.len[j];
-		jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+		jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
 				   ctx->out_q.dht.marker[j];
 		jpeg_buffer.curr = 0;
 
@@ -885,13 +885,13 @@
 static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	struct s5p_jpeg_buffer jpeg_buffer;
 	int c, x, components;
 
 	jpeg_buffer.size = ctx->out_q.sof_len;
 	jpeg_buffer.data =
-		(unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
+		(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
 	jpeg_buffer.curr = 0;
 
 	skip(&jpeg_buffer, 5); /* P, Y, X */
@@ -916,14 +916,14 @@
 static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	struct s5p_jpeg_buffer jpeg_buffer;
 	unsigned int word;
 	int c, i, j;
 
 	for (j = 0; j < ctx->out_q.dqt.n; ++j) {
 		jpeg_buffer.size = ctx->out_q.dqt.len[j];
-		jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+		jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
 				   ctx->out_q.dqt.marker[j];
 		jpeg_buffer.curr = 0;
 
@@ -1264,13 +1264,16 @@
 	return 0;
 }
 
-static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
+static int enum_fmt(struct s5p_jpeg_ctx *ctx,
+		    struct s5p_jpeg_fmt *sjpeg_formats, int n,
 		    struct v4l2_fmtdesc *f, u32 type)
 {
 	int i, num = 0;
+	unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
 
 	for (i = 0; i < n; ++i) {
-		if (sjpeg_formats[i].flags & type) {
+		if (sjpeg_formats[i].flags & type &&
+		    sjpeg_formats[i].flags & fmt_ver_flag) {
 			/* index-th format of type type found ? */
 			if (num == f->index)
 				break;
@@ -1297,11 +1300,11 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
-		return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+		return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
 				SJPEG_FMT_FLAG_ENC_CAPTURE);
 
-	return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
-					SJPEG_FMT_FLAG_DEC_CAPTURE);
+	return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+			SJPEG_FMT_FLAG_DEC_CAPTURE);
 }
 
 static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -1310,11 +1313,11 @@
 	struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
-		return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+		return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
 				SJPEG_FMT_FLAG_ENC_OUTPUT);
 
-	return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
-					SJPEG_FMT_FLAG_DEC_OUTPUT);
+	return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+			SJPEG_FMT_FLAG_DEC_OUTPUT);
 }
 
 static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -2027,15 +2030,15 @@
 {
 	struct s5p_jpeg_ctx *ctx = priv;
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *src_buf, *dst_buf;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
 	unsigned long src_addr, dst_addr, flags;
 
 	spin_lock_irqsave(&ctx->jpeg->slock, flags);
 
 	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-	src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
-	dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+	dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
 
 	s5p_jpeg_reset(jpeg->regs);
 	s5p_jpeg_poweron(jpeg->regs);
@@ -2108,7 +2111,7 @@
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
 	struct s5p_jpeg_fmt *fmt;
-	struct vb2_buffer *vb;
+	struct vb2_v4l2_buffer *vb;
 	struct s5p_jpeg_addr jpeg_addr = {};
 	u32 pix_size, padding_bytes = 0;
 
@@ -2127,7 +2130,7 @@
 		vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 	}
 
-	jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+	jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
 
 	if (fmt->colplanes == 2) {
 		jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
@@ -2145,7 +2148,7 @@
 static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb;
+	struct vb2_v4l2_buffer *vb;
 	unsigned int jpeg_addr = 0;
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2153,7 +2156,7 @@
 	else
 		vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 
-	jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+	jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
 	if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
 	    ctx->mode == S5P_JPEG_DECODE)
 		jpeg_addr += ctx->out_q.sos;
@@ -2268,7 +2271,7 @@
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
 	struct s5p_jpeg_fmt *fmt;
-	struct vb2_buffer *vb;
+	struct vb2_v4l2_buffer *vb;
 	struct s5p_jpeg_addr jpeg_addr = {};
 	u32 pix_size;
 
@@ -2282,7 +2285,7 @@
 		fmt = ctx->cap_q.fmt;
 	}
 
-	jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+	jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
 
 	if (fmt->colplanes == 2) {
 		jpeg_addr.cb = jpeg_addr.y + pix_size;
@@ -2300,7 +2303,7 @@
 static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
 {
 	struct s5p_jpeg *jpeg = ctx->jpeg;
-	struct vb2_buffer *vb;
+	struct vb2_v4l2_buffer *vb;
 	unsigned int jpeg_addr = 0;
 
 	if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2308,7 +2311,7 @@
 	else
 		vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 
-	jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+	jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
 	exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
 }
 
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 15a562a..a4f5932 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -276,13 +276,13 @@
 static void sh_veu_device_run(void *priv)
 {
 	struct sh_veu_dev *veu = priv;
-	struct vb2_buffer *src_buf, *dst_buf;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
 
 	src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
 	dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
 
 	if (src_buf && dst_buf)
-		sh_veu_process(veu, src_buf, dst_buf);
+		sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
 }
 
 		/* ========== video ioctls ========== */
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index f9a810e..5f052189 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -841,6 +841,7 @@
 	if (edid->start_block + edid->blocks > dev->edid_blocks)
 		edid->blocks = dev->edid_blocks - edid->start_block;
 	memcpy(edid->edid, dev->edid, edid->blocks * 128);
-	cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
+	if (adap)
+		cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
 	return 0;
 }
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 9f2a64c..21102ea 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1203,7 +1203,7 @@
 
 	__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
 
-	memset(ev->reserved, 0, sizeof(ev->reserved));
+	memset(ev, 0, sizeof(*ev));
 	ev->type = V4L2_EVENT_CTRL;
 	ev->id = v4l2_ctrl.id;
 	ev->u.ctrl.value = value;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 48503f30..dcca723 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -638,6 +638,14 @@
 	if (!uvc_hw_timestamps_param)
 		return;
 
+	/*
+	 * We will get called from __vb2_queue_cancel() if there are buffers
+	 * done but not dequeued by the user, but the sample array has already
+	 * been released at that time. Just bail out in that case.
+	 */
+	if (!clock->samples)
+		return;
+
 	spin_lock_irqsave(&clock->lock, flags);
 
 	if (clock->count < clock->size)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index fa14101..d16128f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1235,7 +1235,7 @@
 
 static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
 {
-	memset(ev->reserved, 0, sizeof(ev->reserved));
+	memset(ev, 0, sizeof(*ev));
 	ev->type = V4L2_EVENT_CTRL;
 	ev->id = ctrl->id;
 	ev->u.ctrl.changes = changes;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index a29ddca..ae67919 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -146,7 +146,6 @@
 		return;
 
 	check_once = true;
-	WARN_ON(1);
 
 	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
 	if (vb->vb2_queue->allow_zero_bytesused)
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 1d49a8d..7c040a3 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -72,7 +72,7 @@
 	u32 value;
 
 	/* compute the number of MC clock cycles per tick */
-	tick = mc->tick * clk_get_rate(mc->clk);
+	tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
 	do_div(tick, NSEC_PER_SEC);
 
 	value = readl(mc->regs + MC_EMEM_ARB_CFG);
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index 9966891..296c711 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -43,7 +43,9 @@
 void lkdtm_EXEC_VMALLOC(void);
 void lkdtm_EXEC_RODATA(void);
 void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
 void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
 
 /* lkdtm_rodata.c */
 void lkdtm_rodata_do_nothing(void);
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index b72fb64..289c2d3 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -217,7 +217,9 @@
 	CRASHTYPE(EXEC_VMALLOC),
 	CRASHTYPE(EXEC_RODATA),
 	CRASHTYPE(EXEC_USERSPACE),
+	CRASHTYPE(EXEC_NULL),
 	CRASHTYPE(ACCESS_USERSPACE),
+	CRASHTYPE(ACCESS_NULL),
 	CRASHTYPE(WRITE_RO),
 	CRASHTYPE(WRITE_RO_AFTER_INIT),
 	CRASHTYPE(WRITE_KERN),
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index 45f1c0f..1a9dcda 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -160,6 +160,11 @@
 	vm_munmap(user_addr, PAGE_SIZE);
 }
 
+void lkdtm_EXEC_NULL(void)
+{
+	execute_location(NULL, CODE_AS_IS);
+}
+
 void lkdtm_ACCESS_USERSPACE(void)
 {
 	unsigned long user_addr, tmp = 0;
@@ -191,6 +196,19 @@
 	vm_munmap(user_addr, PAGE_SIZE);
 }
 
+void lkdtm_ACCESS_NULL(void)
+{
+	unsigned long tmp;
+	unsigned long *ptr = (unsigned long *)NULL;
+
+	pr_info("attempting bad read at %px\n", ptr);
+	tmp = *ptr;
+	tmp += 0xc0dec0de;
+
+	pr_info("attempting bad write at %px\n", ptr);
+	*ptr = tmp;
+}
+
 void __init lkdtm_perms_init(void)
 {
 	/* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 00cf8e2..72e5031 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -331,6 +331,7 @@
 	char app_name[MAX_APP_NAME_SIZE];
 	u32  app_arch;
 	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+	bool from_smcinvoke;
 };
 
 struct qseecom_listener_handle {
@@ -1268,7 +1269,7 @@
 	new_entry->listener_in_use = false;
 	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
 
-	pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
+	pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
 	return ret;
 }
 
@@ -1327,7 +1328,7 @@
 	kzfree(ptr_svc);
 
 	data->released = true;
-	pr_warn("Service %d is unregistered\n", data->listener.id);
+	pr_debug("Service %d is unregistered\n", data->listener.id);
 	return ret;
 }
 
@@ -1973,6 +1974,7 @@
 	sigset_t old_sigset;
 	unsigned long flags;
 	bool found_app = false;
+	struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
 
 	if (!resp || !data) {
 		pr_err("invalid resp or data pointer\n");
@@ -1981,25 +1983,32 @@
 	}
 
 	/* find app_id & img_name from list */
-	if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
-		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
-		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
-							list) {
-			if ((ptr_app->app_id == data->client.app_id) &&
-				(!strcmp(ptr_app->app_name,
+	if (!ptr_app) {
+		if (data->client.from_smcinvoke) {
+			pr_debug("This request is from smcinvoke\n");
+			ptr_app = &dummy_app_entry;
+			ptr_app->app_id = data->client.app_id;
+		} else {
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+						flags);
+			list_for_each_entry(ptr_app,
+				&qseecom.registered_app_list_head, list) {
+				if ((ptr_app->app_id == data->client.app_id) &&
+					(!strcmp(ptr_app->app_name,
 						data->client.app_name))) {
-				found_app = true;
-				break;
+					found_app = true;
+					break;
+				}
 			}
-		}
-		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
-					flags);
-		if (!found_app) {
-			pr_err("app_id %d (%s) is not found\n",
-				data->client.app_id,
-				(char *)data->client.app_name);
-			ret = -ENOENT;
-			goto exit;
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			if (!found_app) {
+				pr_err("app_id %d (%s) is not found\n",
+					data->client.app_id,
+					(char *)data->client.app_name);
+				ret = -ENOENT;
+				goto exit;
+			}
 		}
 	}
 
@@ -3993,7 +4002,7 @@
 		if (wait_event_interruptible(this_lstnr->rcv_req_wq,
 				__qseecom_listener_has_rcvd_req(data,
 				this_lstnr))) {
-			pr_warn("Interrupted: exiting Listener Service = %d\n",
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
 						(uint32_t)data->listener.id);
 			/* woken up for different reason */
 			return -ERESTARTSYS;
@@ -4943,6 +4952,7 @@
 	resp.data = desc->ret[2];	/*listener_id*/
 
 	dummy_private_data.client.app_id = desc->ret[1];
+	dummy_private_data.client.from_smcinvoke = true;
 	dummy_app_entry.app_id = desc->ret[1];
 
 	mutex_lock(&app_access_lock);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 8fa478c..619457b 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1120,7 +1120,7 @@
 {
 }
 #endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
 {
 
 	mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index a4bf14e..21dfce2 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -920,7 +920,7 @@
 	reg &= ~(1 << 5);
 	OMAP_MMC_WRITE(host, SDIO, reg);
 	/* Set maximum timeout */
-	OMAP_MMC_WRITE(host, CTO, 0xff);
+	OMAP_MMC_WRITE(host, CTO, 0xfd);
 }
 
 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c763b40..3e13969 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -181,7 +181,7 @@
 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
 {
 	struct dma_async_tx_descriptor *tx;
-	enum dma_data_direction direction;
+	enum dma_transfer_direction direction;
 	struct dma_slave_config	config;
 	struct dma_chan *chan;
 	unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 80918ab..4398398 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -127,7 +127,7 @@
 static bool sdhci_acpi_byt(void)
 {
 	static const struct x86_cpu_id byt[] = {
-		{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+		{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
 		{}
 	};
 
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 5f1abe7..9ed60c8 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2,7 +2,7 @@
  * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
  * driver source file
  *
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -155,12 +155,12 @@
 #define CORE_FLL_CYCLE_CNT	(1 << 18)
 #define CORE_DLL_CLOCK_DISABLE	(1 << 21)
 
-#define DDR_CONFIG_POR_VAL		0x80040853
-#define DDR_CONFIG_PRG_RCLK_DLY_MASK	0x1FF
-#define DDR_CONFIG_PRG_RCLK_DLY		115
-#define DDR_CONFIG_2_POR_VAL		0x80040873
+#define DDR_CONFIG_POR_VAL		0x80040873
 #define DLL_USR_CTL_POR_VAL		0x10800
 #define ENABLE_DLL_LOCK_STATUS		(1 << 26)
+#define FINE_TUNE_MODE_EN		(1 << 27)
+#define BIAS_OK_SIGNAL			(1 << 29)
+#define DLL_CONFIG_3_POR_VAL		0x10
 
 /* 512 descriptors */
 #define SDHCI_MSM_MAX_SEGMENTS  (1 << 9)
@@ -203,8 +203,9 @@
 	u32 CORE_DDR_200_CFG;
 	u32 CORE_VENDOR_SPEC3;
 	u32 CORE_DLL_CONFIG_2;
+	u32 CORE_DLL_CONFIG_3;
 	u32 CORE_DDR_CONFIG;
-	u32 CORE_DDR_CONFIG_2;
+	u32 CORE_DDR_CONFIG_OLD; /* Applcable to sddcc minor ver < 0x49 only */
 	u32 CORE_DLL_USR_CTL; /* Present on SDCC5.1 onwards */
 };
 
@@ -233,8 +234,8 @@
 	.CORE_DDR_200_CFG = 0x224,
 	.CORE_VENDOR_SPEC3 = 0x250,
 	.CORE_DLL_CONFIG_2 = 0x254,
-	.CORE_DDR_CONFIG = 0x258,
-	.CORE_DDR_CONFIG_2 = 0x25C,
+	.CORE_DLL_CONFIG_3 = 0x258,
+	.CORE_DDR_CONFIG = 0x25C,
 	.CORE_DLL_USR_CTL = 0x388,
 };
 
@@ -263,8 +264,9 @@
 	.CORE_DDR_200_CFG = 0x184,
 	.CORE_VENDOR_SPEC3 = 0x1B0,
 	.CORE_DLL_CONFIG_2 = 0x1B4,
-	.CORE_DDR_CONFIG = 0x1B8,
-	.CORE_DDR_CONFIG_2 = 0x1BC,
+	.CORE_DLL_CONFIG_3 = 0x1B8,
+	.CORE_DDR_CONFIG_OLD = 0x1B8, /* Applicable to sdcc minor ver < 0x49 */
+	.CORE_DDR_CONFIG = 0x1BC,
 };
 
 u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
@@ -366,6 +368,14 @@
 	VDD_IO_SET_LEVEL,
 };
 
+enum dll_init_context {
+	DLL_INIT_NORMAL = 0,
+	DLL_INIT_FROM_CX_COLLAPSE_EXIT,
+};
+
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+						u32 req_clk);
+
 /* MSM platform specific tuning */
 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
 						u8 poll)
@@ -530,11 +540,13 @@
 	 * Write the selected DLL clock output phase (0 ... 15)
 	 * to CDR_SELEXT bit field of DLL_CONFIG register.
 	 */
-	writel_relaxed(((readl_relaxed(host->ioaddr +
+	if (msm_host->dll_hsr->dll_config & (0xF << 20)) {
+		writel_relaxed(((readl_relaxed(host->ioaddr +
 			msm_host_offset->CORE_DLL_CONFIG)
 			& ~(0xF << 20))
 			| (grey_coded_phase_table[phase] << 20)),
 			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+	}
 
 	/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
 	writel_relaxed((readl_relaxed(host->ioaddr +
@@ -710,7 +722,8 @@
 }
 
 /* Initialize the DLL (Programmable Delay Line ) */
-static int msm_init_cm_dll(struct sdhci_host *host)
+static int msm_init_cm_dll(struct sdhci_host *host,
+				enum dll_init_context init_context)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
@@ -763,22 +776,44 @@
 	writel_relaxed((readl_relaxed(host->ioaddr +
 		msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
 		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
-	msm_cm_dll_set_freq(host);
 
 	if (msm_host->use_updated_dll_reset) {
 		u32 mclk_freq = 0;
+		u32 actual_clk = sdhci_msm_get_sup_clk_rate(host, host->clock);
 
-		if ((readl_relaxed(host->ioaddr +
-					msm_host_offset->CORE_DLL_CONFIG_2)
-					& CORE_FLL_CYCLE_CNT))
-			mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
-		else
-			mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
+		/*
+		 * Only configure the mclk_freq in normal DLL init
+		 * context. If the DLL init is coming from
+		 * CX Collapse Exit context, the host->clock may be zero.
+		 * The DLL_CONFIG_2 register has already been restored to
+		 * proper value prior to getting here.
+		 */
+		if (init_context == DLL_INIT_NORMAL) {
+			switch (actual_clk) {
+			case 202000000:
+			case 201500000:
+			case 200000000:
+				mclk_freq = 42;
+				break;
+			case 192000000:
+				mclk_freq = 40;
+				break;
+			default:
+				mclk_freq = (u32)((actual_clk / TCXO_FREQ) * 4);
+				pr_info_once("%s: %s: Non standard clk freq =%u\n",
+				mmc_hostname(mmc), __func__, actual_clk);
+			}
 
-		writel_relaxed(((readl_relaxed(host->ioaddr +
-			msm_host_offset->CORE_DLL_CONFIG_2)
-			& ~(0xFF << 10)) | (mclk_freq << 10)),
-			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+			if ((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				& CORE_FLL_CYCLE_CNT))
+				mclk_freq *= 2;
+
+			writel_relaxed(((readl_relaxed(host->ioaddr +
+			   msm_host_offset->CORE_DLL_CONFIG_2)
+			   & ~(0xFF << 10)) | (mclk_freq << 10)),
+			   host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+		}
 		/* wait for 5us before enabling DLL clock */
 		udelay(5);
 	}
@@ -794,7 +829,6 @@
 			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	if (msm_host->use_updated_dll_reset) {
-		msm_cm_dll_set_freq(host);
 		/* Enable the DLL clock */
 		writel_relaxed((readl_relaxed(host->ioaddr +
 				msm_host_offset->CORE_DLL_CONFIG_2)
@@ -804,11 +838,39 @@
 
 	/*
 	 * Configure DLL user control register to enable DLL status
-	 * This setting is applicable to SDCC v5.1 onwards only
+	 * This setting is applicable to SDCC v5.1 onwards only.
+	 *
+	 * Configure Tassadar DLL (Only applicable for 7FF projects)
+	 *
 	 */
 	if (msm_host->need_dll_user_ctl) {
-		writel_relaxed(DLL_USR_CTL_POR_VAL | ENABLE_DLL_LOCK_STATUS,
-			host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
+		if (msm_host->dll_hsr) {
+			writel_relaxed(msm_host->dll_hsr->dll_usr_ctl,
+					host->ioaddr +
+					msm_host_offset->CORE_DLL_USR_CTL);
+			writel_relaxed(msm_host->dll_hsr->dll_config_3,
+					host->ioaddr +
+					msm_host_offset->CORE_DLL_CONFIG_3);
+		} else {
+			writel_relaxed(DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
+					ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL,
+					host->ioaddr +
+					msm_host_offset->CORE_DLL_USR_CTL);
+
+			writel_relaxed(DLL_CONFIG_3_POR_VAL, host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_3);
+		}
+	}
+
+	/*
+	 * Update the lower byte of DLL_CONFIG only with HSR values.
+	 * Since these are the static settings.
+	 */
+	if (msm_host->dll_hsr) {
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & (~0xff)) |
+			(msm_host->dll_hsr->dll_config & 0xff)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 	}
 
 	/* Set DLL_EN bit to 1. */
@@ -976,7 +1038,7 @@
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	const struct sdhci_msm_offset *msm_host_offset =
 					msm_host->offset;
-	u32 dll_status, ddr_config;
+	u32 dll_status;
 	int ret = 0;
 
 	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
@@ -987,16 +1049,16 @@
 	 */
 	if (msm_host->pdata->rclk_wa) {
 		writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
-			msm_host_offset->CORE_DDR_CONFIG_2);
-	} else if (msm_host->rclk_delay_fix) {
-		writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
-			msm_host_offset->CORE_DDR_CONFIG_2);
-	} else {
-		ddr_config = DDR_CONFIG_POR_VAL &
-				~DDR_CONFIG_PRG_RCLK_DLY_MASK;
-		ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
-		writel_relaxed(ddr_config, host->ioaddr +
 			msm_host_offset->CORE_DDR_CONFIG);
+	} else if (msm_host->dll_hsr && msm_host->dll_hsr->ddr_config) {
+		writel_relaxed(msm_host->dll_hsr->ddr_config, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG);
+	} else if (msm_host->rclk_delay_fix) {
+		writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG);
+	} else {
+		writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG_OLD);
 	}
 
 	if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
@@ -1065,7 +1127,7 @@
 	/*
 	 * Reset the tuning block.
 	 */
-	ret = msm_init_cm_dll(host);
+	ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
 	if (ret)
 		goto out;
 
@@ -1092,7 +1154,7 @@
 	 * Retuning in HS400 (DDR mode) will fail, just reset the
 	 * tuning block and restore the saved tuning phase.
 	 */
-	ret = msm_init_cm_dll(host);
+	ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
 	if (ret)
 		goto out;
 
@@ -1100,12 +1162,13 @@
 	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
 	if (ret)
 		goto out;
-
-	/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
-	writel_relaxed((readl_relaxed(host->ioaddr +
+	if (msm_host->dll_hsr->dll_config & CORE_CMD_DAT_TRACK_SEL) {
+		/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+		writel_relaxed((readl_relaxed(host->ioaddr +
 				msm_host_offset->CORE_DLL_CONFIG)
 				| CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
 				msm_host_offset->CORE_DLL_CONFIG);
+	}
 
 	if (msm_host->use_cdclp533)
 		/* Calibrate CDCLP533 DLL HW */
@@ -1215,7 +1278,7 @@
 	tuned_phase_cnt = 0;
 
 	/* first of all reset the tuning block */
-	rc = msm_init_cm_dll(host);
+	rc = msm_init_cm_dll(host, DLL_INIT_NORMAL);
 	if (rc)
 		goto kfree;
 
@@ -1965,6 +2028,32 @@
 }
 #endif
 
+static int sdhci_msm_dt_parse_hsr_info(struct device *dev,
+		struct sdhci_msm_host *msm_host)
+
+{
+	u32 *dll_hsr_table = NULL;
+	int dll_hsr_table_len, dll_hsr_reg_count;
+	int ret = 0;
+
+	if (sdhci_msm_dt_get_array(dev, "qcom,dll-hsr-list",
+			&dll_hsr_table, &dll_hsr_table_len, 0))
+		goto skip_hsr;
+
+	dll_hsr_reg_count = sizeof(struct sdhci_msm_dll_hsr) / sizeof(u32);
+	if (dll_hsr_table_len != dll_hsr_reg_count) {
+		dev_err(dev, "Number of HSR entries are not matching\n");
+		ret = -EINVAL;
+	} else {
+		msm_host->dll_hsr = (struct sdhci_msm_dll_hsr *)dll_hsr_table;
+	}
+
+skip_hsr:
+	if (!msm_host->dll_hsr)
+		dev_info(dev, "Failed to get dll hsr settings from dt\n");
+	return ret;
+}
+
 /* Parse platform data */
 static
 struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
@@ -2142,6 +2231,9 @@
 	if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
 		pdata->rclk_wa = true;
 
+	if (sdhci_msm_dt_parse_hsr_info(dev, msm_host))
+		goto out;
+
 	/*
 	 * rclk_wa is not required if soc version is mentioned and
 	 * is not base version.
@@ -2426,7 +2518,7 @@
 						  *vreg, int uA_load)
 {
 	int ret = 0;
-	
+
 	/*
 	 * regulators that do not support regulator_set_voltage also
 	 * do not support regulator_set_optimum_mode
@@ -3140,6 +3232,19 @@
 		sdhci_readl(host, SDHCI_CAPABILITIES_1);
 	msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
 		msm_host_offset->CORE_TESTBUS_CONFIG);
+	msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_2);
+	msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_2);
+	msm_host->regs_restore.dll_config3 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_3);
+	msm_host->regs_restore.dll_usr_ctl = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_USR_CTL);
+
 	msm_host->regs_restore.is_valid = true;
 
 	pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
@@ -3155,6 +3260,7 @@
 	u8 irq_status;
 	const struct sdhci_msm_offset *msm_host_offset =
 					msm_host->offset;
+	struct mmc_ios ios = host->mmc->ios;
 
 	if (!msm_host->regs_restore.is_supported ||
 		!msm_host->regs_restore.is_valid)
@@ -3206,6 +3312,24 @@
 	writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
 			host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
 
+	if (((ios.timing == MMC_TIMING_MMC_HS400) ||
+			(ios.timing == MMC_TIMING_MMC_HS200) ||
+			(ios.timing == MMC_TIMING_UHS_SDR104))
+			&& (ios.clock > CORE_FREQ_100MHZ)) {
+		writel_relaxed(msm_host->regs_restore.dll_config2,
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+		writel_relaxed(msm_host->regs_restore.dll_config3,
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_3);
+		writel_relaxed(msm_host->regs_restore.dll_usr_ctl,
+			host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
+		writel_relaxed(msm_host->regs_restore.dll_config &
+			~(CORE_DLL_RST | CORE_DLL_PDN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+		msm_init_cm_dll(host, DLL_INIT_FROM_CX_COLLAPSE_EXIT);
+		msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+	}
+
 	pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
 		mmc_hostname(host->mmc), __func__,
 		readl_relaxed(host->ioaddr +
@@ -3512,9 +3636,9 @@
 			 * DLL input clock
 			 */
 			writel_relaxed(((readl_relaxed(host->ioaddr +
-				msm_host_offset->CORE_DDR_CONFIG))
+				msm_host_offset->CORE_DLL_CONFIG_3))
 				| RCLK_TOGGLE), host->ioaddr +
-				msm_host_offset->CORE_DDR_CONFIG);
+				msm_host_offset->CORE_DLL_CONFIG_3);
 			/* ensure above write as toggling same bit quickly */
 			wmb();
 			udelay(2);
@@ -3523,9 +3647,9 @@
 			 * DLL input clock
 			 */
 			writel_relaxed(((readl_relaxed(host->ioaddr +
-				msm_host_offset->CORE_DDR_CONFIG))
+				msm_host_offset->CORE_DLL_CONFIG_3))
 				& ~RCLK_TOGGLE), host->ioaddr +
-				msm_host_offset->CORE_DDR_CONFIG);
+				msm_host_offset->CORE_DLL_CONFIG_3);
 		}
 		if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
 			/*
@@ -3768,24 +3892,34 @@
 			msm_host_offset->CORE_MCI_FIFO_CNT),
 		sdhci_msm_readl_relaxed(host,
 			msm_host_offset->CORE_MCI_STATUS));
-	pr_info("DLL cfg:  0x%08x | DLL sts:  0x%08x | SDCC ver: 0x%08x\n",
-		readl_relaxed(host->ioaddr +
-			msm_host_offset->CORE_DLL_CONFIG),
+	pr_info("DLL sts: 0x%08x | DLL cfg:  0x%08x | DLL cfg2: 0x%08x\n",
 		readl_relaxed(host->ioaddr +
 			msm_host_offset->CORE_DLL_STATUS),
-		sdhci_msm_readl_relaxed(host,
-			msm_host_offset->CORE_MCI_VERSION));
-	pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
 		readl_relaxed(host->ioaddr +
-			msm_host_offset->CORE_VENDOR_SPEC),
+			msm_host_offset->CORE_DLL_CONFIG),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_DLL_CONFIG_2));
+	pr_info("DLL cfg3: 0x%08x | DLL usr ctl:  0x%08x | DDR cfg: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_USR_CTL),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_DDR_CONFIG));
+	pr_info("SDCC ver: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_MCI_VERSION),
 		readl_relaxed(host->ioaddr +
 			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
 		readl_relaxed(host->ioaddr +
 			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
-	pr_info("Vndr func2: 0x%08x\n",
+	pr_info("Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
 		readl_relaxed(host->ioaddr +
-			msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
-
+			msm_host_offset->CORE_VENDOR_SPEC),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3));
 	/*
 	 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
 	 * of CORE_TESTBUS_CONFIG register.
@@ -4577,7 +4711,7 @@
 	 * starts coming.
 	 */
 	if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
-				(minor == 0x49) || (minor >= 0x6b)))
+			(minor == 0x49) || (minor == 0x4D) || (minor >= 0x6b)))
 		msm_host->use_14lpp_dll = true;
 
 	/* Fake 3.0V support for SDIO devices which requires such voltage */
@@ -4951,7 +5085,7 @@
 	sdhci_set_default_hw_caps(msm_host, host);
 
 	/*
-	 * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
+	 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH bit can
 	 * be used as required later on.
 	 */
 	writel_relaxed((readl_relaxed(host->ioaddr +
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 27c4f23..641d3c9 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -195,6 +195,23 @@
 	u32 hc_3c_3e;
 	u32 hc_caps_1;
 	u32 testbus_config;
+	u32 dll_config;
+	u32 dll_config2;
+	u32 dll_config3;
+	u32 dll_usr_ctl;
+};
+
+/*
+ * DLL registers which needs be programmed with HSR settings.
+ * Add any new register only at the end and don't change the
+ * seqeunce.
+ */
+struct sdhci_msm_dll_hsr {
+	u32 dll_config;
+	u32 dll_config_2;
+	u32 dll_config_3;
+	u32 dll_usr_ctl;
+	u32 ddr_config;
 };
 
 struct sdhci_msm_debug_data {
@@ -258,6 +275,7 @@
 	int soc_min_rev;
 	struct workqueue_struct *pm_qos_wq;
 	bool need_dll_user_ctl;
+	struct sdhci_msm_dll_hsr *dll_hsr;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 01c8b90..fb20e82 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1092,8 +1092,7 @@
 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
 		((mrq->cmd && mrq->cmd->error) ||
 		 (mrq->sbc && mrq->sbc->error) ||
-		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
-				(mrq->data->stop && mrq->data->stop->error))) ||
+		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
 }
 
@@ -1147,6 +1146,16 @@
 
 	MMC_TRACE(host->mmc, "%s: 0x24=0x%08x\n", __func__,
 		sdhci_readl(host, SDHCI_PRESENT_STATE));
+	/*
+	 * The controller needs a reset of internal state machines upon error
+	 * conditions.
+	 */
+	if (data->error) {
+		if (!host->cmd || host->cmd == data_cmd)
+			sdhci_do_reset(host, SDHCI_RESET_CMD);
+		sdhci_do_reset(host, SDHCI_RESET_DATA);
+	}
+
 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
 		sdhci_adma_table_post(host, data);
@@ -1171,17 +1180,6 @@
 	if (data->stop &&
 	    (data->error ||
 	     !data->mrq->sbc)) {
-
-		/*
-		 * The controller needs a reset of internal state machines
-		 * upon error conditions.
-		 */
-		if (data->error) {
-			if (!host->cmd || host->cmd == data_cmd)
-				sdhci_do_reset(host, SDHCI_RESET_CMD);
-			sdhci_do_reset(host, SDHCI_RESET_DATA);
-		}
-
 		/*
 		 * 'cap_cmd_during_tfr' request must not use the command line
 		 * after mmc_command_done() has been called. It is upper layer's
@@ -2987,7 +2985,7 @@
  *                                                                           *
 \*****************************************************************************/
 
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
 {
 	u16 auto_cmd_status;
 	if (!host->cmd) {
@@ -3036,16 +3034,9 @@
 				host->cmd->error = -EILSEQ;
 		}
 
+		/* Treat data command CRC error the same as data CRC error */
+
 		/*
-		 * If this command initiates a data phase and a response
-		 * CRC error is signalled, the card can start transferring
-		 * data - the card may have received the command without
-		 * error.  We must not terminate the mmc_request early.
-		 *
-		 * If the card did not receive the command or returned an
-		 * error which prevented it sending data, the data phase
-		 * will time out.
-		 *
 		 * Even in case of cmd INDEX OR ENDBIT error we
 		 * handle it the same way.
 		 */
@@ -3053,6 +3044,7 @@
 		    (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
 		     SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
 			host->cmd = NULL;
+			*intmask_p |= SDHCI_INT_DATA_CRC;
 			return;
 		}
 
@@ -3435,7 +3427,7 @@
 			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
 				(host->clock <= 400000))
 				udelay(40);
-			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
 		}
 
 		if (intmask & SDHCI_INT_DATA_MASK) {
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 7005676..0fc1f73 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -675,7 +675,7 @@
 	return false;
 }
 
-static void tmio_mmc_sdio_irq(int irq, void *devid)
+static bool tmio_mmc_sdio_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
 	struct mmc_host *mmc = host->mmc;
@@ -684,7 +684,7 @@
 	unsigned int sdio_status;
 
 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
-		return;
+		return false;
 
 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
@@ -697,6 +697,8 @@
 
 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
 		mmc_signal_sdio_irq(mmc);
+
+	return ireg;
 }
 
 irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -718,9 +720,10 @@
 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
 		return IRQ_HANDLED;
 
-	tmio_mmc_sdio_irq(irq, devid);
+	if (tmio_mmc_sdio_irq(irq, devid))
+		return IRQ_HANDLED;
 
-	return IRQ_HANDLED;
+	return IRQ_NONE;
 }
 EXPORT_SYMBOL(tmio_mmc_irq);
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 24a3433..9316972 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3134,8 +3134,12 @@
 		return NOTIFY_DONE;
 
 	if (event_dev->flags & IFF_MASTER) {
+		int ret;
+
 		netdev_dbg(event_dev, "IFF_MASTER\n");
-		return bond_master_netdev_event(event, event_dev);
+		ret = bond_master_netdev_event(event, event_dev);
+		if (ret != NOTIFY_DONE)
+			return ret;
 	}
 
 	if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 473da3b..258cb39 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1065,13 +1065,6 @@
 {
 	netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
 		    newval->string, newval->value);
-
-	if (bond->dev->flags & IFF_UP) {
-		if (!newval->value)
-			bond->recv_probe = NULL;
-		else if (bond->params.arp_interval)
-			bond->recv_probe = bond_arp_rcv;
-	}
 	bond->params.arp_validate = newval->value;
 
 	return 0;
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 7d16c51..641a532 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-	return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+	return sprintf(buf, "%*phC\n",
+		       slave->dev->addr_len,
+		       slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 7f64a76..ebfbaf8 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -630,22 +630,6 @@
 	qca8k_port_set_status(priv, port, 1);
 }
 
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
-	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-	return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
-	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-	return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
 static void
 qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
 {
@@ -961,8 +945,6 @@
 	.setup			= qca8k_setup,
 	.adjust_link            = qca8k_adjust_link,
 	.get_strings		= qca8k_get_strings,
-	.phy_read		= qca8k_phy_read,
-	.phy_write		= qca8k_phy_write,
 	.get_ethtool_stats	= qca8k_get_ethtool_stats,
 	.get_sset_count		= qca8k_get_sset_count,
 	.get_eee		= qca8k_get_eee,
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b928390..0fdc9ad 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -156,8 +156,6 @@
 #define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
 #define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
 
-#define memcmp_withio(a, b, c)	memcmp((a), (void *)(b), (c))
-
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
 				   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -237,19 +235,26 @@
 
 static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
 {
-	unsigned long outdata = 0xA5A0B5B0;
-	unsigned long indata =  0x00000000;
+	u32 outdata = 0xA5A0B5B0;
+	u32 indata = 0;
+
 	/* Try writing 32 bits */
-	memcpy_toio(membase, &outdata, 4);
-	/* Now compare them */
-	if (memcmp_withio(&outdata, membase, 4) == 0)
+	nubus_writel(outdata, membase);
+	/* Now read it back */
+	indata = nubus_readl(membase);
+	if (outdata == indata)
 		return ACCESS_32;
+
+	outdata = 0xC5C0D5D0;
+	indata = 0;
+
 	/* Write 16 bit output */
 	word_memcpy_tocard(membase, &outdata, 4);
 	/* Now read it back */
 	word_memcpy_fromcard(&indata, membase, 4);
 	if (outdata == indata)
 		return ACCESS_16;
+
 	return ACCESS_UNKNOWN;
 }
 
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 2ff4658..097a0bf 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1338,13 +1338,11 @@
 {
 	struct net_device *netdev;
 	struct atl2_adapter *adapter;
-	static int cards_found;
+	static int cards_found = 0;
 	unsigned long mmio_start;
 	int mmio_len;
 	int err;
 
-	cards_found = 0;
-
 	err = pci_enable_device(pdev);
 	if (err)
 		return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 53a506b..95874c1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -104,6 +104,10 @@
 
 	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
 	reg = rxchk_readl(priv, RXCHK_CONTROL);
+	/* Clear L2 header checks, which would prevent BPDUs
+	 * from being received.
+	 */
+	reg &= ~RXCHK_L2_HDR_DIS;
 	if (priv->rx_chk_en)
 		reg |= RXCHK_EN;
 	else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 737f0f6..620a470 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -959,6 +959,8 @@
 	tpa_info = &rxr->rx_tpa[agg_id];
 
 	if (unlikely(cons != rxr->rx_next_cons)) {
+		netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+			    cons, rxr->rx_next_cons);
 		bnxt_sched_reset(bp, rxr);
 		return;
 	}
@@ -1377,14 +1379,16 @@
 	}
 
 	cons = rxcmp->rx_cmp_opaque;
-	rx_buf = &rxr->rx_buf_ring[cons];
-	data = rx_buf->data;
 	if (unlikely(cons != rxr->rx_next_cons)) {
 		int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
 
+		netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+			    cons, rxr->rx_next_cons);
 		bnxt_sched_reset(bp, rxr);
 		return rc1;
 	}
+	rx_buf = &rxr->rx_buf_ring[cons];
+	data = rx_buf->data;
 	prefetch(data);
 
 	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@@ -1400,11 +1404,17 @@
 
 	rx_buf->data = NULL;
 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
 		bnxt_reuse_rx_data(rxr, cons, data);
 		if (agg_bufs)
 			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
 
 		rc = -EIO;
+		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+			netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+			bnxt_sched_reset(bp, rxr);
+		}
 		goto next_rx;
 	}
 
@@ -5944,8 +5954,15 @@
 
 skip_uc:
 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	if (rc && vnic->mc_list_count) {
+		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+			    rc);
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+		vnic->mc_list_count = 0;
+		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	}
 	if (rc)
-		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
 			   rc);
 
 	return rc;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index da142f6..18ddd24 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -999,7 +999,7 @@
 	case NIC_MBOX_MSG_CFG_DONE:
 		/* Last message of VF config msg sequence */
 		nic_enable_vf(nic, vf, true);
-		goto unlock;
+		break;
 	case NIC_MBOX_MSG_SHUTDOWN:
 		/* First msg in VF teardown sequence */
 		if (vf >= nic->num_vf_en)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c75d4ea9..71f228c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -162,6 +162,17 @@
 	return 1;
 }
 
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		netdev_err(nic->netdev,
+			   "PF didn't respond to CFG DONE msg\n");
+	}
+}
+
 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
 {
 	if (bgx->rx)
@@ -1178,7 +1189,6 @@
 	struct nicvf *nic = netdev_priv(netdev);
 	struct queue_set *qs = nic->qs;
 	struct nicvf_cq_poll *cq_poll = NULL;
-	union nic_mbx mbx = {};
 
 	netif_carrier_off(netdev);
 
@@ -1267,8 +1277,7 @@
 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
 
 	/* Send VF config done msg to PF */
-	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
-	nicvf_write_to_mbx(nic, &mbx);
+	nicvf_send_cfg_done(nic);
 
 	return 0;
 cleanup:
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 89acf7b..b73d9ba 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -120,7 +120,7 @@
 
 	for (i = 0; i < enic->intr_count; i++) {
 		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
-		    (enic->msix[i].affinity_mask &&
+		    (cpumask_available(enic->msix[i].affinity_mask) &&
 		     !cpumask_empty(enic->msix[i].affinity_mask)))
 			continue;
 		if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
@@ -149,7 +149,7 @@
 	for (i = 0; i < enic->intr_count; i++) {
 		if (enic_is_err_intr(enic, i)		||
 		    enic_is_notify_intr(enic, i)	||
-		    !enic->msix[i].affinity_mask	||
+		    !cpumask_available(enic->msix[i].affinity_mask) ||
 		    cpumask_empty(enic->msix[i].affinity_mask))
 			continue;
 		err = irq_set_affinity_hint(enic->msix_entry[i].vector,
@@ -162,7 +162,7 @@
 	for (i = 0; i < enic->wq_count; i++) {
 		int wq_intr = enic_msix_wq_intr(enic, i);
 
-		if (enic->msix[wq_intr].affinity_mask &&
+		if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
 		    !cpumask_empty(enic->msix[wq_intr].affinity_mask))
 			netif_set_xps_queue(enic->netdev,
 					    enic->msix[wq_intr].affinity_mask,
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 812a968..6aa4b50 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -250,14 +250,12 @@
 		return -EINVAL;
 	}
 
+	if (netif_running(netdev))
+		return -EBUSY;
+
 	ug_info->bdRingLenRx[queue] = ring->rx_pending;
 	ug_info->bdRingLenTx[queue] = ring->tx_pending;
 
-	if (netif_running(netdev)) {
-		/* FIXME: restart automatically */
-		netdev_info(netdev, "Please re-open the interface\n");
-	}
-
 	return ret;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 06bc863..66e7a5f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -146,7 +146,6 @@
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-	hnae_free_buffers(ring);
 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
 			 ring->desc_num * sizeof(ring->desc[0]),
 			 ring_to_dma_dir(ring));
@@ -179,6 +178,9 @@
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+	if (is_rx_ring(ring))
+		hnae_free_buffers(ring);
+
 	hnae_free_desc(ring);
 	kfree(ring->desc_cb);
 	ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5bb019d..551b2a9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2820,6 +2820,7 @@
 	dsaf_dev = dev_get_drvdata(&pdev->dev);
 	if (!dsaf_dev) {
 		dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+		put_device(&pdev->dev);
 		return -ENODEV;
 	}
 
@@ -2827,6 +2828,7 @@
 	if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
 		dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
 			dsaf_dev->ae_dev.name);
+		put_device(&pdev->dev);
 		return -ENODEV;
 	}
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ad8681c..24a8159 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -28,9 +28,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -375,8 +372,6 @@
 	wmb(); /* commit all data before submit */
 	assert(skb->queue_mapping < priv->ae_handle->q_num);
 	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-	ring->stats.tx_pkts++;
-	ring->stats.tx_bytes += skb->len;
 
 	return NETDEV_TX_OK;
 
@@ -916,6 +911,9 @@
 		/* issue prefetch for next Tx descriptor */
 		prefetch(&ring->desc_cb[ring->next_to_clean]);
 	}
+	/* update tx ring statistics. */
+	ring->stats.tx_pkts += pkts;
+	ring->stats.tx_bytes += bytes;
 
 	NETIF_TX_UNLOCK(ndev);
 
@@ -1821,7 +1819,7 @@
 			hns_nic_tx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
-			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
 	}
 	for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -1834,7 +1832,7 @@
 			hns_nic_rx_fini_pro_v2;
 
 		netif_napi_add(priv->netdev, &rd->napi,
-			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
 	}
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bd719e2..2dd17e0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3184,6 +3184,7 @@
 
 	if (ehea_add_adapter_mr(adapter)) {
 		pr_err("creating MR failed\n");
+		of_node_put(eth_dn);
 		return -EIO;
 	}
 
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6855b33..8bbedfc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2121,7 +2121,7 @@
 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
 		snprintf(adapter->rx_ring->name,
 			 sizeof(adapter->rx_ring->name) - 1,
-			 "%s-rx-0", netdev->name);
+			 "%.14s-rx-0", netdev->name);
 	else
 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
@@ -2137,7 +2137,7 @@
 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
 		snprintf(adapter->tx_ring->name,
 			 sizeof(adapter->tx_ring->name) - 1,
-			 "%s-tx-0", netdev->name);
+			 "%.14s-tx-0", netdev->name);
 	else
 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
@@ -5291,8 +5291,13 @@
 			/* 8000ES2LAN requires a Rx packet buffer work-around
 			 * on link down event; reset the controller to flush
 			 * the Rx packet buffer.
+			 *
+			 * If the link is lost the controller stops DMA, but
+			 * if there is queued Tx work it cannot be done.  So
+			 * reset the controller to flush the Tx packet buffers.
 			 */
-			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+			if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+			    e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
 				adapter->flags |= FLAG_RESTART_NOW;
 			else
 				pm_schedule_suspend(netdev->dev.parent,
@@ -5315,14 +5320,6 @@
 	adapter->gotc_old = adapter->stats.gotc;
 	spin_unlock(&adapter->stats64_lock);
 
-	/* If the link is lost the controller stops DMA, but
-	 * if there is queued Tx work it cannot be done.  So
-	 * reset the controller to flush the Tx packet buffers.
-	 */
-	if (!netif_carrier_ok(netdev) &&
-	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
-		adapter->flags |= FLAG_RESTART_NOW;
-
 	/* If reset is necessary, do it outside of interrupt context. */
 	if (adapter->flags & FLAG_RESTART_NOW) {
 		schedule_work(&adapter->reset_task);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2aae6f8..a526637 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -58,6 +58,8 @@
 	/* create driver workqueue */
 	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
 					  fm10k_driver_name);
+	if (!fm10k_workqueue)
+		return -ENOMEM;
 
 	fm10k_dbg_init();
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2688180..f948eec 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -193,6 +193,8 @@
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 82e48e3..7956176 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7548,9 +7548,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, rctl, status;
 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-	int retval = 0;
-#endif
+	bool wake;
 
 	rtnl_lock();
 	netif_device_detach(netdev);
@@ -7563,14 +7561,6 @@
 	igb_clear_interrupt_scheme(adapter);
 	rtnl_unlock();
 
-#ifdef CONFIG_PM
-	if (!runtime) {
-		retval = pci_save_state(pdev);
-		if (retval)
-			return retval;
-	}
-#endif
-
 	status = rd32(E1000_STATUS);
 	if (status & E1000_STATUS_LU)
 		wufc &= ~E1000_WUFC_LNKC;
@@ -7587,10 +7577,6 @@
 		}
 
 		ctrl = rd32(E1000_CTRL);
-		/* advertise wake from D3Cold */
-		#define E1000_CTRL_ADVD3WUC 0x00100000
-		/* phy power management enable */
-		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
 		ctrl |= E1000_CTRL_ADVD3WUC;
 		wr32(E1000_CTRL, ctrl);
 
@@ -7604,12 +7590,15 @@
 		wr32(E1000_WUFC, 0);
 	}
 
-	*enable_wake = wufc || adapter->en_mng_pt;
-	if (!*enable_wake)
+	wake = wufc || adapter->en_mng_pt;
+	if (!wake)
 		igb_power_down_link(adapter);
 	else
 		igb_power_up_link(adapter);
 
+	if (enable_wake)
+		*enable_wake = wake;
+
 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
@@ -7624,22 +7613,7 @@
 #ifdef CONFIG_PM_SLEEP
 static int igb_suspend(struct device *dev)
 {
-	int retval;
-	bool wake;
-	struct pci_dev *pdev = to_pci_dev(dev);
-
-	retval = __igb_shutdown(pdev, &wake, 0);
-	if (retval)
-		return retval;
-
-	if (wake) {
-		pci_prepare_to_sleep(pdev);
-	} else {
-		pci_wake_from_d3(pdev, false);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
-	return 0;
+	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 #endif /* CONFIG_PM_SLEEP */
 
@@ -7707,22 +7681,7 @@
 
 static int igb_runtime_suspend(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	int retval;
-	bool wake;
-
-	retval = __igb_shutdown(pdev, &wake, 1);
-	if (retval)
-		return retval;
-
-	if (wake) {
-		pci_prepare_to_sleep(pdev);
-	} else {
-		pci_wake_from_d3(pdev, false);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
-	return 0;
+	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5b12022..526d07e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2886,7 +2886,7 @@
 
 	ret = mv643xx_eth_shared_of_probe(pdev);
 	if (ret)
-		return ret;
+		goto err_put_clk;
 	pd = dev_get_platdata(&pdev->dev);
 
 	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2894,6 +2894,11 @@
 	infer_hw_params(msp);
 
 	return 0;
+
+err_put_clk:
+	if (!IS_ERR(msp->clk))
+		clk_disable_unprepare(msp->clk);
+	return ret;
 }
 
 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c92ffdf..d98b874 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2050,7 +2050,7 @@
 			if (unlikely(!skb))
 				goto err_drop_frame_ret_pool;
 
-			dma_sync_single_range_for_cpu(dev->dev.parent,
+			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
 			                              rx_desc->buf_phys_addr,
 			                              MVNETA_MH_SIZE + NET_SKB_PAD,
 			                              rx_bytes,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index dae9dcf..e528338 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2633,6 +2633,8 @@
 	if (!priv->cmd.context)
 		return -ENOMEM;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
 		priv->cmd.context[i].token = i;
@@ -2658,6 +2660,8 @@
 	down(&priv->cmd.poll_sem);
 	priv->cmd.use_events = 1;
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 
 	return err;
 }
@@ -2670,6 +2674,8 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
+	if (mlx4_is_mfunc(dev))
+		mutex_lock(&priv->cmd.slave_cmd_mutex);
 	down_write(&priv->cmd.switch_sem);
 	priv->cmd.use_events = 0;
 
@@ -2677,9 +2683,12 @@
 		down(&priv->cmd.event_sem);
 
 	kfree(priv->cmd.context);
+	priv->cmd.context = NULL;
 
 	up(&priv->cmd.poll_sem);
 	up_write(&priv->cmd.switch_sem);
+	if (mlx4_is_mfunc(dev))
+		mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 0710b36..cb18632 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1490,7 +1490,7 @@
 	rule.port = port;
 	rule.qpn = qpn;
 	INIT_LIST_HEAD(&rule.list);
-	mlx4_err(dev, "going promisc on %x\n", port);
+	mlx4_info(dev, "going promisc on %x\n", port);
 
 	return  mlx4_flow_attach(dev, &rule, regid_p);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9d1a7d5..7994430 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2677,13 +2677,13 @@
 	int total_pages;
 	int total_mem;
 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+	int tot;
 
 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
 	total_mem = sq_size + rq_size;
-	total_pages =
-		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
-				   page_shift);
+	tot = (total_mem + (page_offset << 6)) >> page_shift;
+	total_pages = !tot ? 1 : roundup_pow_of_two(tot);
 
 	return total_pages;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 029e856..dc809c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@
 	if (err)
 		return err;
 
+	mutex_lock(&mdev->mlx5e_res.td.list_lock);
 	list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
 	return 0;
 }
@@ -53,8 +55,10 @@
 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
 		       struct mlx5e_tir *tir)
 {
+	mutex_lock(&mdev->mlx5e_res.td.list_lock);
 	mlx5_core_destroy_tir(mdev, tir->tirn);
 	list_del(&tir->list);
+	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@
 	}
 
 	INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+	mutex_init(&mdev->mlx5e_res.td.list_lock);
 
 	return 0;
 
@@ -151,6 +156,7 @@
 
 	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
+	mutex_lock(&mdev->mlx5e_res.td.list_lock);
 	list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
 		err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
 		if (err)
@@ -159,6 +165,7 @@
 
 out:
 	kvfree(in);
+	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d5e8ac8..54872f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1365,7 +1365,7 @@
 		break;
 	case MLX5_MODULE_ID_SFP:
 		modinfo->type       = ETH_MODULE_SFF_8472;
-		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
 		break;
 	default:
 		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da9246f..d1a3a35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -92,8 +92,7 @@
 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
 				     in, nic_vport_context);
 
@@ -121,8 +120,7 @@
 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 43d7c83..0bad09d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -368,10 +368,6 @@
 		size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
 	i2c_addr = MLX5_I2C_ADDR_LOW;
-	if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-		i2c_addr = MLX5_I2C_ADDR_HIGH;
-		offset -= MLX5_EEPROM_PAGE_LENGTH;
-	}
 
 	MLX5_SET(mcia_reg, in, l, 0);
 	MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 22a5916e..e3ed70a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1565,7 +1565,7 @@
 	int i;
 
 	for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
-		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+		snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
 			 mlxsw_sp_port_hw_prio_stats[i].str, prio);
 		*p += ETH_GSTRING_LEN;
 	}
@@ -1576,7 +1576,7 @@
 	int i;
 
 	for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
-		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+		snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
 			 mlxsw_sp_port_hw_tc_stats[i].str, tc);
 		*p += ETH_GSTRING_LEN;
 	}
@@ -2059,11 +2059,11 @@
 	if (err)
 		return err;
 
+	mlxsw_sp_port->link.autoneg = autoneg;
+
 	if (!netif_running(dev))
 		return 0;
 
-	mlxsw_sp_port->link.autoneg = autoneg;
-
 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 1edc973..7377dca 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -547,9 +547,8 @@
 		/* set dma read address */
 		ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
 
-		/* start the packet dma process, and set auto-dequeue rx */
-		ks8851_wrreg16(ks, KS_RXQCR,
-			       ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+		/* start DMA access */
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
 
 		if (rxlen > 4) {
 			unsigned int rxalign;
@@ -580,7 +579,8 @@
 			}
 		}
 
-		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+		/* end DMA access and dequeue packet */
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
 	}
 }
 
@@ -797,6 +797,15 @@
 static int ks8851_net_open(struct net_device *dev)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
+	int ret;
+
+	ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+				   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				   dev->name, ks);
+	if (ret < 0) {
+		netdev_err(dev, "failed to get irq\n");
+		return ret;
+	}
 
 	/* lock the card, even if we may not actually be doing anything
 	 * else at the moment */
@@ -861,6 +870,7 @@
 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
 
 	mutex_unlock(&ks->lock);
+	mii_check_link(&ks->mii);
 	return 0;
 }
 
@@ -911,6 +921,8 @@
 		dev_kfree_skb(txb);
 	}
 
+	free_irq(dev->irq, ks);
+
 	return 0;
 }
 
@@ -1516,6 +1528,7 @@
 
 	spi_set_drvdata(spi, ks);
 
+	netif_carrier_off(ks->netdev);
 	ndev->if_port = IF_PORT_100BASET;
 	ndev->netdev_ops = &ks8851_netdev_ops;
 	ndev->irq = spi->irq;
@@ -1542,14 +1555,6 @@
 	ks8851_read_selftest(ks);
 	ks8851_init_mac(ks);
 
-	ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
-				   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-				   ndev->name, ks);
-	if (ret < 0) {
-		dev_err(&spi->dev, "failed to get irq\n");
-		goto err_irq;
-	}
-
 	ret = register_netdev(ndev);
 	if (ret) {
 		dev_err(&spi->dev, "failed to register network device\n");
@@ -1562,14 +1567,10 @@
 
 	return 0;
 
-
 err_netdev:
-	free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
 	if (gpio_is_valid(gpio))
 		gpio_set_value(gpio, 0);
-err_id:
 	regulator_disable(ks->vdd_reg);
 err_reg:
 	regulator_disable(ks->vdd_io);
@@ -1587,7 +1588,6 @@
 		dev_info(&spi->dev, "remove\n");
 
 	unregister_netdev(priv->netdev);
-	free_irq(spi->irq, priv);
 	if (gpio_is_valid(priv->gpio))
 		gpio_set_value(priv->gpio, 0);
 	regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
index f8df530..7308777 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -756,15 +756,10 @@
 
 static int
 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
-	      enum alu_op alu_op, bool skip)
+	      enum alu_op alu_op)
 {
 	const struct bpf_insn *insn = &meta->insn;
 
-	if (skip) {
-		meta->skip = true;
-		return 0;
-	}
-
 	wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
 	wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
 
@@ -1017,7 +1012,7 @@
 
 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
 }
 
 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1027,7 +1022,7 @@
 
 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
 }
 
 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1037,7 +1032,7 @@
 
 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
 }
 
 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1047,7 +1042,7 @@
 
 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
 }
 
 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1057,7 +1052,7 @@
 
 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+	return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
 }
 
 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 0a2318c..63ebc49 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1038,6 +1038,8 @@
 
 	for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
 		skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+		if (!skb)
+			break;
 		qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
 		skb_put(skb, QLCNIC_ILB_PKT_SIZE);
 		adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 71836a7..480883a 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -457,7 +457,7 @@
 		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 
 	/* Set FIFO size */
-	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 
 	/* Timestamp enable */
 	ravb_write(ndev, TCCR_TFEN, TCCR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ce97e52..77dc584 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -205,6 +205,11 @@
 	if (unlikely(rdes0 & RDES0_OWN))
 		return dma_own;
 
+	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+		stats->rx_length_errors++;
+		return discard_frame;
+	}
+
 	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
 		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
 			x->rx_desc++;
@@ -235,9 +240,10 @@
 	 * It doesn't match with the information reported into the databook.
 	 * At any rate, we need to understand if the CSUM hw computation is ok
 	 * and report this info to the upper layers. */
-	ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-				 !!(rdes0 & RDES0_FRAME_TYPE),
-				 !!(rdes0 & ERDES0_RX_MAC_ADDR));
+	if (likely(ret == good_frame))
+		ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+					 !!(rdes0 & RDES0_FRAME_TYPE),
+					 !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
 	if (unlikely(rdes0 & RDES0_DRIBBLING))
 		x->dribbling_bit++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index fd78406..01f8f2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -95,8 +95,6 @@
 		return dma_own;
 
 	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-		pr_warn("%s: Oversized frame spanned multiple buffers\n",
-			__func__);
 		stats->rx_length_errors++;
 		return discard_frame;
 	}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20a2b01..2c04a07 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1747,11 +1747,6 @@
 	if (ret < 0)
 		pr_warn("%s: failed debugFS registration\n", __func__);
 #endif
-	/* Start the ball rolling... */
-	pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
-	priv->hw->dma->start_tx(priv->ioaddr);
-	priv->hw->dma->start_rx(priv->ioaddr);
-
 	/* Dump DMA/MAC registers */
 	if (netif_msg_hw(priv)) {
 		priv->hw->mac->dump_regs(priv->hw);
@@ -1779,6 +1774,11 @@
 	if (priv->tso)
 		priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
 
+	/* Start the ball rolling... */
+	pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+	priv->hw->dma->start_tx(priv->ioaddr);
+	priv->hw->dma->start_rx(priv->ioaddr);
+
 	return 0;
 }
 
@@ -1796,8 +1796,6 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-	stmmac_check_ether_addr(priv);
-
 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
 	    priv->hw->pcs != STMMAC_PCS_TBI &&
 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -2931,6 +2929,20 @@
 	return ret;
 }
 
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	int ret = 0;
+
+	ret = eth_mac_addr(ndev, addr);
+	if (ret)
+		return ret;
+
+	priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+
+	return ret;
+}
+
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *stmmac_fs_dir;
 
@@ -3137,7 +3149,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = stmmac_poll_controller,
 #endif
-	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_set_mac_address = stmmac_set_mac_address,
 };
 
 /**
@@ -3341,6 +3353,8 @@
 	if (ret)
 		goto error_hw_init;
 
+	stmmac_check_ether_addr(priv);
+
 	ndev->netdev_ops = &stmmac_netdev_ops;
 
 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d543298..ff24524 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3122,12 +3122,16 @@
 
 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
-	if (ret)
+	if (ret) {
+		of_node_put(interfaces);
 		return ret;
+	}
 
 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
-	if (ret)
+	if (ret) {
+		of_node_put(interfaces);
 		return ret;
+	}
 
 	/* Create network interfaces */
 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c688d68..a8afc92 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1548,12 +1548,14 @@
 	ret = of_address_to_resource(np, 0, &dmares);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to get DMA resource\n");
+		of_node_put(np);
 		goto free_netdev;
 	}
 	lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
 	if (IS_ERR(lp->dma_regs)) {
 		dev_err(&pdev->dev, "could not map DMA regs\n");
 		ret = PTR_ERR(lp->dma_regs);
+		of_node_put(np);
 		goto free_netdev;
 	}
 	lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4a2609c..72fb55c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -463,7 +463,12 @@
 	struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
 	int err = 0;
 
-	if (data && data[IFLA_IPVLAN_MODE]) {
+	if (!data)
+		return 0;
+	if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (data[IFLA_IPVLAN_MODE]) {
 		u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
 
 		err = ipvlan_set_port_mode(port, nmode);
@@ -530,6 +535,8 @@
 		struct ipvl_dev *tmp = netdev_priv(phy_dev);
 
 		phy_dev = tmp->phy_dev;
+		if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
 	} else if (!netif_is_ipvlan_port(phy_dev)) {
 		err = ipvlan_port_create(phy_dev);
 		if (err < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5203523..77357b0 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1429,9 +1429,10 @@
 
 static void marvell_get_strings(struct phy_device *phydev, u8 *data)
 {
+	int count = marvell_get_sset_count(phydev);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+	for (i = 0; i < count; i++) {
 		memcpy(data + i * ETH_GSTRING_LEN,
 		       marvell_hw_stats[i].string, ETH_GSTRING_LEN);
 	}
@@ -1470,9 +1471,10 @@
 static void marvell_get_stats(struct phy_device *phydev,
 			      struct ethtool_stats *stats, u64 *data)
 {
+	int count = marvell_get_sset_count(phydev);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+	for (i = 0; i < count; i++)
 		data[i] = marvell_get_stat(phydev, i);
 }
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 09deef4..a9bbdce 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -319,7 +319,6 @@
 	err = device_register(&bus->dev);
 	if (err) {
 		pr_err("mii_bus %s failed to register\n", bus->id);
-		put_device(&bus->dev);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 1e2d4f1..45df036 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -162,6 +162,14 @@
 };
 MODULE_DEVICE_TABLE(spi, ks8995_id);
 
+static const struct of_device_id ks8895_spi_of_match[] = {
+        { .compatible = "micrel,ks8995" },
+        { .compatible = "micrel,ksz8864" },
+        { .compatible = "micrel,ksz8795" },
+        { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
 static inline u8 get_chip_id(u8 val)
 {
 	return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -529,6 +537,7 @@
 static struct spi_driver ks8995_driver = {
 	.driver = {
 		.name	    = "spi-ks8995",
+		.of_match_table = of_match_ptr(ks8895_spi_of_match),
 	},
 	.probe	  = ks8995_probe,
 	.remove	  = ks8995_remove,
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index b5edc7f..685e875 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -610,12 +610,20 @@
 
 static int __init deflate_init(void)
 {
-        int answer = ppp_register_compressor(&ppp_deflate);
-        if (answer == 0)
-                printk(KERN_INFO
-		       "PPP Deflate Compression module registered\n");
-	ppp_register_compressor(&ppp_deflate_draft);
-        return answer;
+	int rc;
+
+	rc = ppp_register_compressor(&ppp_deflate);
+	if (rc)
+		return rc;
+
+	rc = ppp_register_compressor(&ppp_deflate_draft);
+	if (rc) {
+		ppp_unregister_compressor(&ppp_deflate);
+		return rc;
+	}
+
+	pr_info("PPP Deflate Compression module registered\n");
+	return 0;
 }
 
 static void __exit deflate_cleanup(void)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 2e3f932..a90b7af 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -536,6 +536,7 @@
 		pppox_unbind_sock(sk);
 	}
 	skb_queue_purge(&sk->sk_receive_queue);
+	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
 }
 
 static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index cfd81eb..ddceed3 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@
 void
 slhc_free(struct slcompress *comp)
 {
-	if ( comp == NULLSLCOMPR )
+	if ( IS_ERR_OR_NULL(comp) )
 		return;
 
 	if ( comp->tstate != NULLSLSTATE )
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 375b681..3eb6d48 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1163,6 +1163,12 @@
 		return -EINVAL;
 	}
 
+	if (netdev_has_upper_dev(dev, port_dev)) {
+		netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+			   portname);
+		return -EBUSY;
+	}
+
 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
 	    vlan_uses_dev(dev)) {
 		netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
@@ -1251,6 +1257,23 @@
 		goto err_option_port_add;
 	}
 
+	/* set promiscuity level to new slave */
+	if (dev->flags & IFF_PROMISC) {
+		err = dev_set_promiscuity(port_dev, 1);
+		if (err)
+			goto err_set_slave_promisc;
+	}
+
+	/* set allmulti level to new slave */
+	if (dev->flags & IFF_ALLMULTI) {
+		err = dev_set_allmulti(port_dev, 1);
+		if (err) {
+			if (dev->flags & IFF_PROMISC)
+				dev_set_promiscuity(port_dev, -1);
+			goto err_set_slave_promisc;
+		}
+	}
+
 	netif_addr_lock_bh(dev);
 	dev_uc_sync_multiple(port_dev, dev);
 	dev_mc_sync_multiple(port_dev, dev);
@@ -1267,6 +1290,9 @@
 
 	return 0;
 
+err_set_slave_promisc:
+	__team_option_inst_del_port(team, port);
+
 err_option_port_add:
 	team_upper_dev_unlink(team, port);
 
@@ -1312,6 +1338,12 @@
 
 	team_port_disable(team, port);
 	list_del_rcu(&port->list);
+
+	if (dev->flags & IFF_PROMISC)
+		dev_set_promiscuity(port_dev, -1);
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(port_dev, -1);
+
 	team_upper_dev_unlink(team, port);
 	netdev_rx_handler_unregister(port_dev);
 	team_port_disable_netpoll(port);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 03ba0f1c..9942054 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1194,9 +1194,6 @@
 	u32 rxhash;
 	ssize_t n;
 
-	if (!(tun->dev->flags & IFF_UP))
-		return -EIO;
-
 	if (!(tun->flags & IFF_NO_PI)) {
 		if (len < sizeof(pi))
 			return -EINVAL;
@@ -1273,9 +1270,11 @@
 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
 
 	if (err) {
+		err = -EFAULT;
+drop:
 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
 		kfree_skb(skb);
-		return -EFAULT;
+		return err;
 	}
 
 	err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
@@ -1331,7 +1330,16 @@
 	skb_probe_transport_header(skb, 0);
 
 	rxhash = skb_get_hash(skb);
+
+	rcu_read_lock();
+	if (unlikely(!(tun->dev->flags & IFF_UP))) {
+		err = -EIO;
+		rcu_read_unlock();
+		goto drop;
+	}
+
 	netif_rx_ni(skb);
+	rcu_read_unlock();
 
 	stats = get_cpu_ptr(tun->pcpu_stats);
 	u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index f1f8227..01f95d1 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -148,6 +148,7 @@
 	u8 bulk_in;
 	u8 bulk_out;
 	struct delayed_work carrier_work;
+	bool confirmed_pairing;
 };
 
 static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
@@ -259,7 +260,7 @@
 
 	dev->net->stats.rx_packets++;
 	dev->net->stats.rx_bytes += len;
-
+	dev->confirmed_pairing = true;
 	netif_rx(skb);
 	ipheth_rx_submit(dev, GFP_ATOMIC);
 }
@@ -280,14 +281,24 @@
 		dev_err(&dev->intf->dev, "%s: urb status: %d\n",
 		__func__, status);
 
-	netif_wake_queue(dev->net);
+	if (status == 0)
+		netif_wake_queue(dev->net);
+	else
+		// on URB error, trigger immediate poll
+		schedule_delayed_work(&dev->carrier_work, 0);
 }
 
 static int ipheth_carrier_set(struct ipheth_device *dev)
 {
-	struct usb_device *udev = dev->udev;
+	struct usb_device *udev;
 	int retval;
 
+	if (!dev)
+		return 0;
+	if (!dev->confirmed_pairing)
+		return 0;
+
+	udev = dev->udev;
 	retval = usb_control_msg(udev,
 			usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
 			IPHETH_CMD_CARRIER_CHECK, /* request */
@@ -302,11 +313,14 @@
 		return retval;
 	}
 
-	if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+	if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
 		netif_carrier_on(dev->net);
-	else
+		if (dev->tx_urb->status != -EINPROGRESS)
+			netif_wake_queue(dev->net);
+	} else {
 		netif_carrier_off(dev->net);
-
+		netif_stop_queue(dev->net);
+	}
 	return 0;
 }
 
@@ -386,7 +400,6 @@
 		return retval;
 
 	schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
-	netif_start_queue(net);
 	return retval;
 }
 
@@ -489,7 +502,7 @@
 	dev->udev = udev;
 	dev->net = netdev;
 	dev->intf = intf;
-
+	dev->confirmed_pairing = false;
 	/* Set up endpoints */
 	hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
 	if (hintf == NULL) {
@@ -540,7 +553,9 @@
 		retval = -EIO;
 		goto err_register_netdev;
 	}
-
+	// carrier down and transmit queues stopped until packet from device
+	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(netdev);
 	dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
 	return 0;
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 134eb18..d51ad14 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -890,13 +890,14 @@
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
+	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
 	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC7304/MC7354 */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 10)},	/* Sierra Wireless MC7304/MC7354 */
+	{QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC7304/MC7354, WP76xx */
+	{QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
 	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
 	{QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
 	{QMI_FIXED_INTF(0x1199, 0x9041, 8)},	/* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 373713f..b6ee0c1 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1380,6 +1380,14 @@
 		goto drop;
 	}
 
+	rcu_read_lock();
+
+	if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+		rcu_read_unlock();
+		atomic_long_inc(&vxlan->dev->rx_dropped);
+		goto drop;
+	}
+
 	stats = this_cpu_ptr(vxlan->dev->tstats);
 	u64_stats_update_begin(&stats->syncp);
 	stats->rx_packets++;
@@ -1387,6 +1395,9 @@
 	u64_stats_update_end(&stats->syncp);
 
 	gro_cells_receive(&vxlan->gro_cells, skb);
+
+	rcu_read_unlock();
+
 	return 0;
 
 drop:
@@ -2362,6 +2373,8 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 
+	gro_cells_destroy(&vxlan->gro_cells);
+
 	vxlan_fdb_delete_default(vxlan);
 
 	free_percpu(dev->tstats);
@@ -3112,7 +3125,6 @@
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 
-	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
 }
@@ -3363,10 +3375,8 @@
 		/* If vxlan->dev is in the same netns, it has already been added
 		 * to the list by the previous loop.
 		 */
-		if (!net_eq(dev_net(vxlan->dev), net)) {
-			gro_cells_destroy(&vxlan->gro_cells);
+		if (!net_eq(dev_net(vxlan->dev), net))
 			unregister_netdevice_queue(vxlan->dev, &list);
-		}
 	}
 
 	unregister_netdevice_many(&list);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 21aec5c..bbfe7be 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -4277,7 +4277,7 @@
 							    rate_code[i],
 							    type);
 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
-			strncat(tpc_value, buff, strlen(buff));
+			strlcat(tpc_value, buff, sizeof(tpc_value));
 		}
 		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
 		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 30ced6d..4883bc6 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1424,6 +1424,12 @@
 	u8 *buf, *dpos;
 	const u8 *spos;
 
+	if (!ies1)
+		ies1_len = 0;
+
+	if (!ies2)
+		ies2_len = 0;
+
 	if (ies1_len == 0 && ies2_len == 0) {
 		*merged_ies = NULL;
 		*merged_len = 0;
@@ -1433,17 +1439,19 @@
 	buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
-	memcpy(buf, ies1, ies1_len);
+	if (ies1)
+		memcpy(buf, ies1, ies1_len);
 	dpos = buf + ies1_len;
 	spos = ies2;
-	while (spos + 1 < ies2 + ies2_len) {
+	while (spos && (spos + 1 < ies2 + ies2_len)) {
 		/* IE tag at offset 0, length at offset 1 */
 		u16 ielen = 2 + spos[1];
 
 		if (spos + ielen > ies2 + ies2_len)
 			break;
 		if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
-		    !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
+		    (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
+						     spos, ielen))) {
 			memcpy(dpos, spos, ielen);
 			dpos += ielen;
 		}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e58a50d..c21f8bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -475,7 +475,7 @@
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	struct list_head local_empty;
-	int pending = atomic_xchg(&rba->req_pending, 0);
+	int pending = atomic_read(&rba->req_pending);
 
 	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
 
@@ -530,11 +530,13 @@
 			i++;
 		}
 
+		atomic_dec(&rba->req_pending);
 		pending--;
+
 		if (!pending) {
-			pending = atomic_xchg(&rba->req_pending, 0);
+			pending = atomic_read(&rba->req_pending);
 			IWL_DEBUG_RX(trans,
-				     "Pending allocation requests = %d\n",
+				     "Got more pending allocation requests = %d\n",
 				     pending);
 		}
 
@@ -546,12 +548,15 @@
 		spin_unlock(&rba->lock);
 
 		atomic_inc(&rba->req_ready);
+
 	}
 
 	spin_lock(&rba->lock);
 	/* return unused rbds to the allocator empty list */
 	list_splice_tail(&local_empty, &rba->rbd_empty);
 	spin_unlock(&rba->lock);
+
+	IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
 }
 
 /*
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a4906..57ad564 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@
 	err = pci_enable_device(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
-		return err;
+		goto err_put;
 	}
 
 	mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@
 	pci_release_regions(pdev);
  err_disable_dev:
 	pci_disable_device(pdev);
+err_put:
 	pci_dev_put(pdev);
 	return err;
 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 780acf2..e9ec1da 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3167,7 +3167,7 @@
 			goto out_err;
 		}
 
-		genlmsg_reply(skb, info);
+		res = genlmsg_reply(skb, info);
 		break;
 	}
 
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index e0ade40..4b53920 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -433,8 +433,6 @@
 			  skb_tail_pointer(skb),
 			  MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
 
-	cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
-
 	lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
 		cardp->rx_urb);
 	ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
index 662d127..57b503a 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -17,7 +17,7 @@
 
 struct mt7601u_dev;
 
-#define MT7601U_EE_MAX_VER			0x0c
+#define MT7601U_EE_MAX_VER			0x0d
 #define MT7601U_EEPROM_SIZE			256
 
 #define MT7601U_DEFAULT_TX_POWER		6
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index f68d492..822833a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -669,7 +669,6 @@
 	CONFIG_CHANNEL_HT40,
 	CONFIG_POWERSAVING,
 	CONFIG_HT_DISABLED,
-	CONFIG_QOS_DISABLED,
 	CONFIG_MONITORING,
 
 	/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 987c7c4..55036ce 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -666,19 +666,9 @@
 			rt2x00dev->intf_associated--;
 
 		rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-		clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
 	}
 
 	/*
-	 * Check for access point which do not support 802.11e . We have to
-	 * generate data frames sequence number in S/W for such AP, because
-	 * of H/W bug.
-	 */
-	if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-		set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
-	/*
 	 * When the erp information has changed, we should perform
 	 * additional configuration steps. For all other changes we are done.
 	 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 68b620b..9a15a69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@
 	if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
 		/*
 		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-		 * seqno on retransmited data (non-QOS) frames. To workaround
-		 * the problem let's generate seqno in software if QOS is
-		 * disabled.
+		 * seqno on retransmitted data (non-QOS) and management frames.
+		 * To workaround the problem let's generate seqno in software.
+		 * Except for beacons which are transmitted periodically by H/W
+		 * hence hardware has to assign seqno for them.
 		 */
-		if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-			__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-		else
+	    	if (ieee80211_is_beacon(hdr->frame_control)) {
+			__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 			/* H/W will generate sequence number */
 			return;
+		}
+
+		__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 	}
 
 	/*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f8be0bd..0741280 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1703,6 +1703,7 @@
 					rtlhal->oem_id = RT_CID_819X_LENOVO;
 					break;
 				}
+				break;
 			case 0x1025:
 				rtlhal->oem_id = RT_CID_819X_ACER;
 				break;
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index d3fbe33..a13f08f 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -75,7 +75,6 @@
 	atomic_inc(&handle->thread_done);
 	rsi_set_event(&handle->event);
 
-	wait_for_completion(&handle->completion);
 	return kthread_stop(handle->task);
 }
 
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c5492d7..b35f470 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,8 +84,11 @@
 
 	frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
 		req->ie_len);
-	if (!frame.skb)
+	if (!frame.skb) {
+		mutex_unlock(&priv->conf_mutex);
+		up(&priv->scan.lock);
 		return -ENOMEM;
+	}
 
 	if (req->ie_len)
 		memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5438975..17d32ce 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1058,8 +1058,11 @@
 		goto out;
 
 	ret = wl12xx_fetch_firmware(wl, plt);
-	if (ret < 0)
-		goto out;
+	if (ret < 0) {
+		kfree(wl->fw_status);
+		kfree(wl->raw_fw_status);
+		kfree(wl->tx_res_if);
+	}
 
 out:
 	return ret;
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 52f34b3..d255147 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -365,7 +365,6 @@
 	wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz;
 	wiphy->bands[NL80211_BAND_60GHZ] = NULL;
 
-	wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 
 	priv = wiphy_priv(wiphy);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index c2840e4..850e755 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1074,6 +1074,12 @@
 };
 MODULE_DEVICE_TABLE(spi, st95hf_id);
 
+static const struct of_device_id st95hf_spi_of_match[] = {
+        { .compatible = "st,st95hf" },
+        { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
 static int st95hf_probe(struct spi_device *nfc_spi_dev)
 {
 	int ret;
@@ -1260,6 +1266,7 @@
 	.driver = {
 		.name = "st95hf",
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(st95hf_spi_of_match),
 	},
 	.id_table = st95hf_id,
 	.probe = st95hf_probe,
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 97dd292..5d2c766 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -190,14 +190,15 @@
 		return NULL;
 
 	nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-	if (nd_btt->id < 0) {
-		kfree(nd_btt);
-		return NULL;
-	}
+	if (nd_btt->id < 0)
+		goto out_nd_btt;
 
 	nd_btt->lbasize = lbasize;
-	if (uuid)
+	if (uuid) {
 		uuid = kmemdup(uuid, 16, GFP_KERNEL);
+		if (!uuid)
+			goto out_put_id;
+	}
 	nd_btt->uuid = uuid;
 	dev = &nd_btt->dev;
 	dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -212,6 +213,13 @@
 		return NULL;
 	}
 	return dev;
+
+out_put_id:
+	ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+	kfree(nd_btt);
+	return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index d8d189d..66a089d 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -492,7 +492,7 @@
 
 static int __pmem_label_update(struct nd_region *nd_region,
 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
-		int pos)
+		int pos, unsigned long flags)
 {
 	u64 cookie = nd_region_interleave_set_cookie(nd_region);
 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -530,7 +530,7 @@
 	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
 	if (nspm->alt_name)
 		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
-	nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
+	nd_label->flags = __cpu_to_le32(flags);
 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
 	nd_label->position = __cpu_to_le16(pos);
 	nd_label->isetcookie = __cpu_to_le64(cookie);
@@ -922,13 +922,13 @@
 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
 		struct nd_namespace_pmem *nspm, resource_size_t size)
 {
-	int i;
+	int i, rc;
 
 	for (i = 0; i < nd_region->ndr_mappings; i++) {
 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
 		struct resource *res;
-		int rc, count = 0;
+		int count = 0;
 
 		if (size == 0) {
 			rc = del_labels(nd_mapping, nspm->uuid);
@@ -946,7 +946,20 @@
 		if (rc < 0)
 			return rc;
 
-		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
+		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
+				NSLABEL_FLAG_UPDATING);
+		if (rc)
+			return rc;
+	}
+
+	if (size == 0)
+		return 0;
+
+	/* Clear the UPDATING flag per UEFI 2.7 expectations */
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+
+		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
 		if (rc)
 			return rc;
 	}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 74257ac..cf4a90b 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -138,6 +138,7 @@
 bool pmem_should_map_pages(struct device *dev)
 {
 	struct nd_region *nd_region = to_nd_region(dev->parent);
+	struct nd_namespace_common *ndns = to_ndns(dev);
 	struct nd_namespace_io *nsio;
 
 	if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@
 	if (is_nd_pfn(dev) || is_nd_btt(dev))
 		return false;
 
+	if (ndns->force_raw)
+		return false;
+
 	nsio = to_nd_namespace_io(dev);
 	if (region_intersects(nsio->res.start, resource_size(&nsio->res),
 				IORESOURCE_SYSTEM_RAM,
@@ -2024,9 +2028,12 @@
 	if (!nsblk->uuid)
 		goto blk_err;
 	memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-	if (name[0])
+	if (name[0]) {
 		nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
 				GFP_KERNEL);
+		if (!nsblk->alt_name)
+			goto blk_err;
+	}
 	res = nsblk_add_resource(nd_region, ndd, nsblk,
 			__le64_to_cpu(nd_label->dpa));
 	if (!res)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index d6aa59c..ba9aa84 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -515,7 +515,7 @@
 
 static unsigned long init_altmap_reserve(resource_size_t base)
 {
-	unsigned long reserve = PHYS_PFN(SZ_8K);
+	unsigned long reserve = PFN_UP(SZ_8K);
 	unsigned long base_pfn = PHYS_PFN(base);
 
 	reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f12753e..96ea6c7 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -709,6 +709,15 @@
 		return __nvmet_host_allowed(subsys, hostnqn);
 }
 
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+	struct nvmet_ctrl *ctrl =
+			container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+	ctrl->ops->delete_ctrl(ctrl);
+}
+
 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
 {
@@ -747,6 +756,7 @@
 
 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
 	INIT_LIST_HEAD(&ctrl->async_events);
+	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
 
 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -849,21 +859,11 @@
 	kref_put(&ctrl->ref, nvmet_ctrl_free);
 }
 
-static void nvmet_fatal_error_handler(struct work_struct *work)
-{
-	struct nvmet_ctrl *ctrl =
-			container_of(work, struct nvmet_ctrl, fatal_err_work);
-
-	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
-	ctrl->ops->delete_ctrl(ctrl);
-}
-
 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
 {
 	mutex_lock(&ctrl->lock);
 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
 		ctrl->csts |= NVME_CSTS_CFS;
-		INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
 		schedule_work(&ctrl->fatal_err_work);
 	}
 	mutex_unlock(&ctrl->lock);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index b482431..3a91e06 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -568,6 +568,9 @@
 		break;
 
 	case DISPLAY_MODEL_LASI:
+		/* Skip to register LED in QEMU */
+		if (running_on_qemu)
+			return 1;
 		LED_DATA_REG = data_reg;
 		led_func_ptr = led_LASI_driver;
 		printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index bdce067..02e6485 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1377,7 +1377,7 @@
 {
 	int i;
 	for (i = 0; i < NR_SUPERIOS; i++)
-		if (superios[i].io != p->base)
+		if (superios[i].io == p->base)
 			return &superios[i];
 	return NULL;
 }
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index b4d8ccf..200b415 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -1620,6 +1620,7 @@
 	spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
 
 	put_pcichild(hpdev, hv_pcidev_ref_childlist);
+	put_pcichild(hpdev, hv_pcidev_ref_initial);
 	put_pcichild(hpdev, hv_pcidev_ref_pnp);
 	put_hvpcibus(hpdev->hbus);
 }
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index cc68136..a1dab55 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -633,6 +633,7 @@
 	bool				use_pinctrl;
 	struct pinctrl			*pinctrl;
 	struct pinctrl_state		*pins_default;
+	struct pinctrl_state            *pins_power_on;
 	struct pinctrl_state		*pins_sleep;
 	struct msm_pcie_device_info   pcidev_table[MAX_DEVICE_NUM];
 };
@@ -6012,6 +6013,16 @@
 			msm_pcie_dev[rc_idx].pins_default = NULL;
 		}
 
+		msm_pcie_dev[rc_idx].pins_power_on =
+		pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
+				"slot_power_on");
+		if (IS_ERR(msm_pcie_dev[rc_idx].pins_power_on)) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d could not get pinctrl power_on state\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].pins_power_on = NULL;
+		}
+
 		msm_pcie_dev[rc_idx].pins_sleep =
 			pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
 						"sleep");
@@ -6039,6 +6050,11 @@
 	msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
 
 	msm_pcie_dev[rc_idx].drv_ready = true;
+	if (msm_pcie_dev[rc_idx].use_pinctrl &&
+		msm_pcie_dev[rc_idx].pins_power_on) {
+		pinctrl_select_state(msm_pcie_dev[rc_idx].pinctrl,
+			msm_pcie_dev[rc_idx].pins_power_on);
+	}
 
 	if (msm_pcie_dev[rc_idx].boot_option &
 			MSM_PCIE_NO_PROBE_ENUMERATION) {
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c7f3408..54b3f9b 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -71,8 +71,8 @@
  * arch/x86/platform/intel-mid/pwr.c.
  */
 static const struct x86_cpu_id lpss_cpu_ids[] = {
-	ICPU(INTEL_FAM6_ATOM_PENWELL),
-	ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+	ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
+	ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
 	{}
 };
 
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e38bb48..832b8c8 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -172,6 +172,38 @@
 	link->clkpm_capable = (blacklist) ? 0 : capable;
 }
 
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+	struct pci_dev *parent = link->pdev;
+	unsigned long start_jiffies;
+	u16 reg16;
+
+	pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+	reg16 |= PCI_EXP_LNKCTL_RL;
+	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+	if (parent->clear_retrain_link) {
+		/*
+		 * Due to an erratum in some devices the Retrain Link bit
+		 * needs to be cleared again manually to allow the link
+		 * training to succeed.
+		 */
+		reg16 &= ~PCI_EXP_LNKCTL_RL;
+		pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+	}
+
+	/* Wait for link training end. Break out after waiting for timeout */
+	start_jiffies = jiffies;
+	for (;;) {
+		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+		if (!(reg16 & PCI_EXP_LNKSTA_LT))
+			break;
+		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+			break;
+		msleep(1);
+	}
+	return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
 /*
  * pcie_aspm_configure_common_clock: check if the 2 ends of a link
  *   could use common clock. If they are, configure them to use the
@@ -181,7 +213,6 @@
 {
 	int same_clock = 1;
 	u16 reg16, parent_reg, child_reg[8];
-	unsigned long start_jiffies;
 	struct pci_dev *child, *parent = link->pdev;
 	struct pci_bus *linkbus = parent->subordinate;
 	/*
@@ -221,21 +252,7 @@
 		reg16 &= ~PCI_EXP_LNKCTL_CCC;
 	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
 
-	/* Retrain link */
-	reg16 |= PCI_EXP_LNKCTL_RL;
-	pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
-	/* Wait for link training end. Break out after waiting for timeout */
-	start_jiffies = jiffies;
-	for (;;) {
-		pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
-		if (!(reg16 & PCI_EXP_LNKSTA_LT))
-			break;
-		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
-			break;
-		msleep(1);
-	}
-	if (!(reg16 & PCI_EXP_LNKSTA_LT))
+	if (pcie_retrain_link(link))
 		return;
 
 	/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dedb120..f474899 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2046,6 +2046,23 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
 
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130.  See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+	dev->clear_retrain_link = 1;
+	pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
 static void fixup_rev1_53c810(struct pci_dev *dev)
 {
 	u32 class = dev->class;
@@ -3326,6 +3343,7 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
 
 static void quirk_no_pm_reset(struct pci_dev *dev)
 {
@@ -3866,6 +3884,8 @@
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
 			 quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+			 quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
 			 quirk_dma_func1_alias);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index cbe5f5c..e1b689f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -662,7 +662,7 @@
 
 static const char * const sdxc_a_groups[] = {
 	"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
-	"sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+	"sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
 };
 
 static const char * const pcm_a_groups[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 25152ea..95925a0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -839,10 +839,11 @@
 		if (req->firewall_rules_list[i].ip_type !=
 				QMI_IPA_IP_TYPE_V4_V01 &&
 			req->firewall_rules_list[i].ip_type !=
-				QMI_IPA_IP_TYPE_V6_V01)
+				QMI_IPA_IP_TYPE_V6_V01) {
 			IPAWANERR("Invalid IP type %d\n",
 					req->firewall_rules_list[i].ip_type);
-		return -EINVAL;
+			return -EINVAL;
+		}
 	}
 
 	req_desc.max_msg_len =
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 24b9a86..00942c3 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -87,6 +87,8 @@
 	bool wr_cmpl;
 	/* Uevent broadcast of channel state */
 	bool state_bcast;
+	/* Skip node creation if not needed */
+	bool skip_node;
 	/* Number of write request structs to allocate */
 	u32 num_wr_reqs;
 
@@ -220,6 +222,29 @@
 		true
 	},
 	{
+		MHI_CLIENT_DCI_OUT,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_OUT,
+		NULL,
+		NULL,
+		NULL,
+		false,
+		true
+	},
+	{
+		MHI_CLIENT_DCI_IN,
+		TRB_MAX_DATA_SIZE,
+		MAX_NR_TRBS_PER_CHAN,
+		MHI_DIR_IN,
+		NULL,
+		NULL,
+		NULL,
+		false,
+		false,
+		true
+	},
+	{
 		MHI_CLIENT_DUN_OUT,
 		TRB_MAX_DATA_SIZE,
 		MAX_NR_TRBS_PER_CHAN,
@@ -237,6 +262,7 @@
 		NULL,
 		false,
 		false,
+		false,
 		50
 	},
 	{
@@ -1835,7 +1861,8 @@
 		 * this client's channels is called by the MHI driver,
 		 * if one is registered.
 		 */
-		if (mhi_client->in_chan_attr->chan_state_cb)
+		if (mhi_client->in_chan_attr->chan_state_cb ||
+				mhi_client->in_chan_attr->skip_node)
 			continue;
 		ret_val = uci_device_create(mhi_client);
 		if (ret_val)
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 46cab17..df998f6 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -1550,6 +1550,8 @@
 {
 	u32 m_cmd0 = 0;
 	u32 m_irq_status = 0;
+	u32 s_cmd0 = 0;
+	u32 s_irq_status = 0;
 	u32 geni_status = 0;
 	u32 geni_ios = 0;
 	u32 dma_rx_irq = 0;
@@ -1576,10 +1578,12 @@
 	}
 	m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0);
 	m_irq_status = geni_read_reg(base, SE_GENI_M_IRQ_STATUS);
+	s_cmd0 = geni_read_reg(base, SE_GENI_S_CMD0);
+	s_irq_status = geni_read_reg(base, SE_GENI_S_IRQ_STATUS);
 	geni_status = geni_read_reg(base, SE_GENI_STATUS);
 	geni_ios = geni_read_reg(base, SE_GENI_IOS);
-	dma_rx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
-	dma_tx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
+	dma_tx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
+	dma_rx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
 	rx_fifo_status = geni_read_reg(base, SE_GENI_RX_FIFO_STATUS);
 	tx_fifo_status = geni_read_reg(base, SE_GENI_TX_FIFO_STATUS);
 	se_dma_dbg = geni_read_reg(base, SE_DMA_DEBUG_REG0);
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 55663b3..58dcee5 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -68,6 +68,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/seq_file.h>
 #include <linux/string.h>
 #include <linux/tick.h>
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index c890a49..f655586 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4422,14 +4422,16 @@
 			}
 			return AE_OK;
 		}
-	default:
-		dprintk("Resource %d isn't an IRQ nor an IO port\n",
-			resource->type);
 
 	case ACPI_RESOURCE_TYPE_END_TAG:
 		return AE_OK;
+
+	default:
+		dprintk("Resource %d isn't an IRQ nor an IO port\n",
+			resource->type);
+		return AE_CTRL_TERMINATE;
+
 	}
-	return AE_CTRL_TERMINATE;
 }
 
 static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 75b8e0c..8a0a8fb 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -899,6 +899,10 @@
 	/* Register charger interrupts */
 	for (i = 0; i < CHRG_INTR_END; i++) {
 		pirq = platform_get_irq(info->pdev, i);
+		if (pirq < 0) {
+			dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+			return pirq;
+		}
 		info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
 		if (info->irq[i] < 0) {
 			dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index e664ca7..13f23c0 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1212,7 +1212,6 @@
 	if (ret < 0) {
 		pr_info("Cannot register extcon_dev for %s(cable: %s)\n",
 			cable->extcon_name, cable->name);
-		ret = -EINVAL;
 	}
 
 	return ret;
@@ -1634,7 +1633,7 @@
 
 	if (IS_ERR(desc)) {
 		dev_err(&pdev->dev, "No platform data (desc) found\n");
-		return -ENODEV;
+		return PTR_ERR(desc);
 	}
 
 	cm = devm_kzalloc(&pdev->dev,
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 7756b9f..a6efed1 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -427,15 +427,11 @@
 	char *prop_buf;
 	char *attrname;
 
-	dev_dbg(dev, "uevent\n");
-
 	if (!psy || !psy->desc) {
 		dev_dbg(dev, "No power supply yet\n");
 		return ret;
 	}
 
-	dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
 	ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
 	if (ret)
 		return ret;
@@ -471,8 +467,6 @@
 			goto out;
 		}
 
-		dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
 		ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
 		kfree(attrname);
 		if (ret)
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 7d79eac..7d9c7ba 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -1490,7 +1490,7 @@
 	 * If charge termination WA is active and has suspended charging, then
 	 * continue reporting charging status as FULL.
 	 */
-	if (is_client_vote_enabled(chg->usb_icl_votable,
+	if (is_client_vote_enabled_locked(chg->usb_icl_votable,
 						CHG_TERMINATION_VOTER)) {
 		val->intval = POWER_SUPPLY_STATUS_FULL;
 		return 0;
@@ -2854,7 +2854,6 @@
 		return IRQ_HANDLED;
 	}
 
-	rerun_election(chg->fcc_votable);
 	power_supply_changed(chg->batt_psy);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 3c71f60..8809c1a 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1175,12 +1175,12 @@
 	RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE,	rapl_defaults_core),
 	RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP,	rapl_defaults_core),
 
-	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1,	rapl_defaults_byt),
+	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT,	rapl_defaults_byt),
 	RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT,	rapl_defaults_cht),
-	RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD,	rapl_defaults_tng),
-	RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD,	rapl_defaults_ann),
+	RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,rapl_defaults_tng),
+	RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID,	rapl_defaults_ann),
 	RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT,	rapl_defaults_core),
-	RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON,	rapl_defaults_core),
+	RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X,	rapl_defaults_core),
 
 	RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL,	rapl_defaults_hsw_server),
 	{}
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 7652477..39e8d60 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -131,7 +131,7 @@
  * ACT8865 voltage number
  */
 #define	ACT8865_VOLTAGE_NUM	64
-#define ACT8600_SUDCDC_VOLTAGE_NUM	255
+#define ACT8600_SUDCDC_VOLTAGE_NUM	256
 
 struct act8865 {
 	struct regmap *regmap;
@@ -222,7 +222,8 @@
 	REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
 	REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
 	REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
-	REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
+	REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
+	REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
 };
 
 static struct regulator_ops act8865_ops = {
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 92f8875..2daf751 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -303,13 +303,13 @@
 	regulator_desc_ldo(2, STEP_50_MV),
 	regulator_desc_ldo(3, STEP_50_MV),
 	regulator_desc_ldo(4, STEP_50_MV),
-	regulator_desc_ldo(5, STEP_50_MV),
+	regulator_desc_ldo(5, STEP_25_MV),
 	regulator_desc_ldo(6, STEP_25_MV),
 	regulator_desc_ldo(7, STEP_50_MV),
 	regulator_desc_ldo(8, STEP_50_MV),
 	regulator_desc_ldo(9, STEP_50_MV),
 	regulator_desc_ldo(10, STEP_50_MV),
-	regulator_desc_ldo(11, STEP_25_MV),
+	regulator_desc_ldo(11, STEP_50_MV),
 	regulator_desc_ldo(12, STEP_50_MV),
 	regulator_desc_ldo(13, STEP_50_MV),
 	regulator_desc_ldo(14, STEP_50_MV),
@@ -320,11 +320,11 @@
 	regulator_desc_ldo(19, STEP_50_MV),
 	regulator_desc_ldo(20, STEP_50_MV),
 	regulator_desc_ldo(21, STEP_50_MV),
-	regulator_desc_ldo(22, STEP_25_MV),
-	regulator_desc_ldo(23, STEP_25_MV),
+	regulator_desc_ldo(22, STEP_50_MV),
+	regulator_desc_ldo(23, STEP_50_MV),
 	regulator_desc_ldo(24, STEP_50_MV),
 	regulator_desc_ldo(25, STEP_50_MV),
-	regulator_desc_ldo(26, STEP_50_MV),
+	regulator_desc_ldo(26, STEP_25_MV),
 	regulator_desc_buck1_4(1),
 	regulator_desc_buck1_4(2),
 	regulator_desc_buck1_4(3),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d838e77..1fe1c18 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -376,7 +376,7 @@
 	regulator_desc_s2mps11_ldo(32, STEP_50_MV),
 	regulator_desc_s2mps11_ldo(33, STEP_50_MV),
 	regulator_desc_s2mps11_ldo(34, STEP_50_MV),
-	regulator_desc_s2mps11_ldo(35, STEP_50_MV),
+	regulator_desc_s2mps11_ldo(35, STEP_25_MV),
 	regulator_desc_s2mps11_ldo(36, STEP_50_MV),
 	regulator_desc_s2mps11_ldo(37, STEP_50_MV),
 	regulator_desc_s2mps11_ldo(38, STEP_50_MV),
@@ -386,8 +386,8 @@
 	regulator_desc_s2mps11_buck1_4(4),
 	regulator_desc_s2mps11_buck5,
 	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
-	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
-	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
+	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
 	regulator_desc_s2mps11_buck9,
 	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index f85cae2..7e92e49 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@
 	da9063_data_to_tm(data, &rtc->alarm_time, rtc);
 	rtc->rtc_sync = false;
 
+	/*
+	 * TODO: some models have alarms on a minute boundary but still support
+	 * real hardware interrupts. Add this once the core supports it.
+	 */
+	if (config->rtc_data_start != RTC_SEC)
+		rtc->rtc_dev->uie_unsupported = 1;
+
 	irq_alarm = platform_get_irq_byname(pdev, "ALARM");
 	ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
 					da9063_alarm_event,
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index e6bfb9c..5b136bd 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -52,13 +52,11 @@
  */
 void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
 {
-	unsigned int month, year;
-	unsigned long secs;
+	unsigned int month, year, secs;
 	int days;
 
 	/* time must be positive */
-	days = div_s64(time, 86400);
-	secs = time - (unsigned int) days * 86400;
+	days = div_s64_rem(time, 86400, &secs);
 
 	/* day of the week, 1970-01-01 was a Thursday */
 	tm->tm_wday = (days + 4) % 7;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 17b6235..600fb7f 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -454,7 +454,7 @@
 static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
 {
 	unsigned int byte;
-	int value = 0xff;	/* return 0xff for ignored values */
+	int value = -1;			/* return -1 for ignored values */
 
 	byte = readb(rtc->regbase + reg_off);
 	if (byte & AR_ENB) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index be17de9..9d77220 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2054,14 +2054,14 @@
 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
 
 raw:
-	block->blocks = (private->real_cyl *
+	block->blocks = ((unsigned long) private->real_cyl *
 			  private->rdc_data.trk_per_cyl *
 			  blk_per_trk);
 
 	dev_info(&device->cdev->dev,
-		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
+		 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
 		 "%s\n", (block->bp_block >> 10),
-		 ((private->real_cyl *
+		 (((unsigned long) private->real_cyl *
 		   private->rdc_data.trk_per_cyl *
 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
 		 ((blk_per_trk * block->bp_block) >> 10),
@@ -4508,6 +4508,14 @@
 		usrparm.psf_data &= 0x7fffffffULL;
 		usrparm.rssd_result &= 0x7fffffffULL;
 	}
+	/* at least 2 bytes are accessed and should be allocated */
+	if (usrparm.psf_data_len < 2) {
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			      "Symmetrix ioctl invalid data length %d",
+			      usrparm.psf_data_len);
+		rc = -EINVAL;
+		goto out;
+	}
 	/* alloc I/O data area */
 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 285b400..5d5e78a 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -628,7 +628,7 @@
 		     (void (*)(unsigned long)) con3270_read_tasklet,
 		     (unsigned long) condev->read);
 
-	raw3270_add_view(&condev->view, &con3270_fn, 1);
+	raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
 
 	INIT_LIST_HEAD(&condev->freemem);
 	for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 85eca1c..04a6810 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -462,7 +462,8 @@
 
 	init_waitqueue_head(&fp->wait);
 	fp->fs_pid = get_pid(task_pid(current));
-	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+	rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+			      RAW3270_VIEW_LOCK_BH);
 	if (rc) {
 		fs3270_free_view(&fp->view);
 		goto out;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index a2da898..1ebf632 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -919,7 +919,7 @@
  * Add view to device with minor "minor".
  */
 int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
 {
 	unsigned long flags;
 	struct raw3270 *rp;
@@ -941,6 +941,7 @@
 		view->cols = rp->cols;
 		view->ascebc = rp->ascebc;
 		spin_lock_init(&view->lock);
+		lockdep_set_subclass(&view->lock, subclass);
 		list_add(&view->list, &rp->view_list);
 		rc = 0;
 		spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 56519cb..7577d7d 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -149,6 +149,8 @@
 struct raw3270_view {
 	struct list_head list;
 	spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ	0
+#define RAW3270_VIEW_LOCK_BH	1
 	atomic_t ref_count;
 	struct raw3270 *dev;
 	struct raw3270_fn *fn;
@@ -157,7 +159,7 @@
 	unsigned char *ascebc;		/* ascii -> ebcdic table */
 };
 
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
 int raw3270_activate_view(struct raw3270_view *);
 void raw3270_del_view(struct raw3270_view *);
 void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 272cb6c..6dd6f9f 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -978,7 +978,8 @@
 		return PTR_ERR(tp);
 
 	rc = raw3270_add_view(&tp->view, &tty3270_fn,
-			      tty->index + RAW3270_FIRSTMINOR);
+			      tty->index + RAW3270_FIRSTMINOR,
+			      RAW3270_VIEW_LOCK_BH);
 	if (rc) {
 		tty3270_free_view(tp);
 		return rc;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ad17fc5..e22b9ac 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@
 		if (priv->channel[direction] == NULL) {
 			if (direction == CTCM_WRITE)
 				channel_free(priv->channel[CTCM_READ]);
+			result = -ENODEV;
 			goto out_dev;
 		}
 		priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 2abcd33..abe460e 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -652,6 +652,20 @@
 	add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+				     int clear, char *dbftag)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		_zfcp_erp_port_forced_reopen(port, clear, dbftag);
+	read_unlock(&adapter->port_list_lock);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
 				      int clear, char *id)
 {
@@ -1306,6 +1320,9 @@
 		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
 		int lun_status;
 
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL)
+			continue;
 		if (zsdev->port != port)
 			continue;
 		/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b326f05..a39a745 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -68,6 +68,8 @@
 extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+					    int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688a..f7630cf 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -238,10 +238,6 @@
 	list_for_each_entry(port, &adapter->port_list, list) {
 		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
 			zfcp_fc_test_link(port);
-		if (!port->d_id)
-			zfcp_erp_port_reopen(port,
-					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -249,6 +245,7 @@
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
 	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fc_els_rscn *head;
 	struct fc_els_rscn_page *page;
 	u16 i;
@@ -261,6 +258,22 @@
 	/* see FC-FS */
 	no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
 
+	if (no_entries > 1) {
+		/* handle failed ports */
+		unsigned long flags;
+		struct zfcp_port *port;
+
+		read_lock_irqsave(&adapter->port_list_lock, flags);
+		list_for_each_entry(port, &adapter->port_list, list) {
+			if (port->d_id)
+				continue;
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+		}
+		read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	}
+
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
 		page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 3afb200..bdb257e 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -326,6 +326,10 @@
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret = SUCCESS, fc_ret;
 
+	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+		zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+		zfcp_erp_wait(adapter);
+	}
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 3493d44..d5e510f 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -283,6 +283,8 @@
 {
 	struct virtio_ccw_vq_info *info;
 
+	if (!vcdev->airq_info)
+		return;
 	list_for_each_entry(info, &vcdev->virtqueues, node)
 		drop_airq_indicator(info->vq, vcdev->airq_info);
 }
@@ -424,7 +426,7 @@
 	ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
 	if (ret)
 		return ret;
-	return vcdev->config_block->num;
+	return vcdev->config_block->num ?: -ENOENT;
 }
 
 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 89a52b9..894d97e 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1713,8 +1713,11 @@
 	}
 
 out:
-	if (req->nsge > 0)
+	if (req->nsge > 0) {
 		scsi_dma_unmap(cmnd);
+		if (req->dcopy && (host_status == DID_OK))
+			host_status = csio_scsi_copy_to_sgl(hw, req);
+	}
 
 	cmnd->result = (((host_status) << 16) | scsi_status);
 	cmnd->scsi_done(cmnd);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f7..5252dd5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,6 +10,7 @@
  */
 
 #include "hisi_sas.h"
+#include "../libsas/sas_internal.h"
 #define DRV_NAME "hisi_sas"
 
 #define DEV_IS_GONE(dev) \
@@ -1128,9 +1129,18 @@
 
 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
 {
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_phy *sphy = sas_phy->phy;
+	struct sas_phy_data *d = sphy->hostdata;
+
 	phy->phy_attached = 0;
 	phy->phy_type = 0;
 	phy->port = NULL;
+
+	if (d->enable)
+		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+	else
+		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
 }
 
 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e3ffd24..97aeadd 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1935,7 +1935,6 @@
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 
-		rdata->flags &= ~FC_RP_STARTED;
 		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
 		kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c79743d..2ffe104 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1448,7 +1448,13 @@
 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
 		return -ENODATA;
 
+	spin_lock_bh(&conn->session->back_lock);
+	if (conn->task == NULL) {
+		spin_unlock_bh(&conn->session->back_lock);
+		return -ENODATA;
+	}
 	__iscsi_get_task(task);
+	spin_unlock_bh(&conn->session->back_lock);
 	spin_unlock_bh(&conn->session->frwd_lock);
 	rc = conn->session->tt->xmit_task(task);
 	spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 7be581f..1a6f65d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -47,17 +47,16 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+		complete(&task->slow_task->completion);
+	}
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	complete(&task->slow_task->completion);
 }
 
 static void smp_task_done(struct sas_task *task)
 {
-	if (!del_timer(&task->slow_task->timer))
-		return;
+	del_timer(&task->slow_task->timer);
 	complete(&task->slow_task->completion);
 }
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5de024a..5b1c37e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3956,6 +3956,7 @@
 	if (megasas_create_frame_pool(instance)) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
 		megasas_free_cmds(instance);
+		return -ENOMEM;
 	}
 
 	return 0;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9a34afc..5c3dfd9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -345,7 +345,7 @@
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SREADING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -418,7 +418,7 @@
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SWRITING;
 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f9f899e..c158967 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3207,6 +3207,8 @@
 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
 		return -EINVAL;
 	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
 	conn = cls_conn->dd_data;
 	qla_conn = conn->dd_data;
 	qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 282ea00..9d555b6 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -249,6 +249,7 @@
 	{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
 	{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
 	{"SONY", "TSL", NULL, BLIST_FORCELUN},		/* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 375cede..c9bc6f0 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@
 	{"NETAPP", "INF-01-00",		"rdac", },
 	{"LSI", "INF-01-00",		"rdac", },
 	{"ENGENIO", "INF-01-00",	"rdac", },
+	{"LENOVO", "DE_Series",		"rdac", },
 	{NULL, NULL,			NULL },
 };
 
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1d57b34..5de4741 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -219,7 +219,7 @@
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
 	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
-		       GFP_ATOMIC);
+		       GFP_KERNEL);
 	if (!sdev)
 		goto out;
 
@@ -796,7 +796,7 @@
 	 */
 	sdev->inquiry = kmemdup(inq_result,
 				max_t(size_t, sdev->inquiry_len, 36),
-				GFP_ATOMIC);
+				GFP_KERNEL);
 	if (sdev->inquiry == NULL)
 		return SCSI_SCAN_NO_RESPONSE;
 
@@ -1094,7 +1094,7 @@
 	if (!sdev)
 		goto out;
 
-	result = kmalloc(result_len, GFP_ATOMIC |
+	result = kmalloc(result_len, GFP_KERNEL |
 			((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
 	if (!result)
 		goto out_free_sdev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
old mode 100644
new mode 100755
index f2acfbc..4fba09b
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1290,11 +1290,6 @@
 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
 	}
 
-	/*
-	 * XXX and what if there are packets in flight and this close()
-	 * XXX is followed by a "rmmod sd_mod"?
-	 */
-
 	scsi_disk_put(sdkp);
 }
 
@@ -2737,6 +2732,58 @@
 		sdkp->ws10 = 1;
 }
 
+/*
+ * Determine the device's preferred I/O size for reads and writes
+ * unless the reported value is unreasonably small, large, not a
+ * multiple of the physical block size, or simply garbage.
+ */
+static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+				      unsigned int dev_max)
+{
+	struct scsi_device *sdp = sdkp->device;
+	unsigned int opt_xfer_bytes =
+		sdkp->opt_xfer_blocks * sdp->sector_size;
+
+	if (sdkp->opt_xfer_blocks == 0)
+		return false;
+
+	if (sdkp->opt_xfer_blocks > dev_max) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u logical blocks " \
+				"> dev_max (%u logical blocks)\n",
+				sdkp->opt_xfer_blocks, dev_max);
+		return false;
+	}
+
+	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u logical blocks " \
+				"> sd driver limit (%u logical blocks)\n",
+				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
+		return false;
+	}
+
+	if (opt_xfer_bytes < PAGE_SIZE) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u bytes < " \
+				"PAGE_SIZE (%u bytes)\n",
+				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
+		return false;
+	}
+
+	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u bytes not a " \
+				"multiple of physical block size (%u bytes)\n",
+				opt_xfer_bytes, sdkp->physical_block_size);
+		return false;
+	}
+
+	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
+			opt_xfer_bytes);
+	return true;
+}
+
 /**
  *	sd_revalidate_disk - called the first time a new disk is seen,
  *	performs disk spin up, read_capacity, etc.
@@ -2801,18 +2848,10 @@
 	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
 	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
 
-	/*
-	 * Determine the device's preferred I/O size for reads and writes
-	 * unless the reported value is unreasonably small, large, or
-	 * garbage.
-	 */
-	if (sdkp->opt_xfer_blocks &&
-	    sdkp->opt_xfer_blocks <= dev_max &&
-	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
 		rw_max = q->limits.io_opt =
-			sdkp->opt_xfer_blocks * sdp->sector_size;
-	else
+						sdkp->opt_xfer_blocks * sdp->sector_size;
+	} else
 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
 				      (sector_t)BLK_DEF_MAX_SECTORS);
 
@@ -3120,11 +3159,23 @@
 {
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct gendisk *disk = sdkp->disk;
-	
+	struct request_queue *q = disk->queue;
+
 	spin_lock(&sd_index_lock);
 	ida_remove(&sd_index_ida, sdkp->index);
 	spin_unlock(&sd_index_lock);
 
+	/*
+	 * Wait until all requests that are in progress have completed.
+	 * This is necessary to avoid that e.g. scsi_end_request() crashes
+	 * due to clearing the disk->private_data pointer. Wait from inside
+	 * scsi_disk_release() instead of from sd_release() to avoid that
+	 * freezing and unfreezing the request queue affects user space I/O
+	 * in case multiple processes open a /dev/sd... node concurrently.
+	 */
+	blk_mq_freeze_queue(q);
+	blk_mq_unfreeze_queue(q);
+
 	disk->private_data = NULL;
 	put_disk(disk);
 	put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d92b280..6df34d6 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -641,13 +641,22 @@
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
 	struct storvsc_device *stor_device;
-	int num_cpus = num_online_cpus();
 	int num_sc;
 	struct storvsc_cmd_request *request;
 	struct vstor_packet *vstor_packet;
 	int ret, t;
 
-	num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+	/*
+	 * If the number of CPUs is artificially restricted, such as
+	 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+	 * sub-channels >= the number of CPUs. These sub-channels
+	 * should not be created. The primary channel is already created
+	 * and assigned to one CPU, so check against # CPUs - 1.
+	 */
+	num_sc = min((int)(num_online_cpus() - 1), max_chns);
+	if (!num_sc)
+		return;
+
 	stor_device = get_out_stor_device(device);
 	if (!stor_device)
 		return;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
old mode 100644
new mode 100755
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index cbc8e93..7ba0031 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -693,7 +693,6 @@
 		return FAILED;
 
 	memset(cmd, 0, sizeof(*cmd));
-	cmd->sc = sc;
 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 		.type = VIRTIO_SCSI_T_TMF,
 		.subtype = cpu_to_virtio32(vscsi->vdev,
@@ -752,7 +751,6 @@
 		return FAILED;
 
 	memset(cmd, 0, sizeof(*cmd));
-	cmd->sc = sc;
 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 		.type = VIRTIO_SCSI_T_TMF,
 		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9583336..cbf1734 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -240,6 +240,16 @@
 	  This figures are reported in mpm sleep clock cycles and have a
 	  resolution of 31 bits as 1 bit is used as an overflow check.
 
+config MSM_BOOT_TIME_MARKER
+        bool "Use MSM boot time marker reporting"
+        depends on MSM_BOOT_STATS
+        help
+         Use this to mark msm boot kpi for measurement.
+         An instrumentation for boot time measurement.
+         To create an entry, call "place_marker" function.
+         At userspace, write marker name to "/sys/kernel/debug/bootkpi/kpi_values"
+         If unsure, say N
+
 config MSM_CORE_HANG_DETECT
        tristate "MSM Core Hang Detection Support"
        help
@@ -554,6 +564,16 @@
 	  registers the transport with IPC Router and enable message
 	  exchange.
 
+config MSM_IPC_ROUTER_MHI_DEV_XPRT
+	depends on MSM_MHI_DEV
+	depends on IPC_ROUTER
+	bool "MSM MHI_DEV XPRT Layer"
+	help
+	  MHI_DEV Transport Layer that enables off-chip communication of
+	  IPC Router. When the MHI endpoint becomes available, this layer
+	  registers the transport with IPC Router and enables message
+	  exchange.
+
 config MSM_IPC_ROUTER_GLINK_XPRT
 	depends on MSM_GLINK
 	depends on IPC_ROUTER
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 2e59e77..1dda5d1 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -26,6 +26,7 @@
 obj-$(CONFIG_QCOM_EARLY_RANDOM)	+= early_random.o
 obj-$(CONFIG_SOC_BUS) += socinfo.o
 obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
+obj-$(CONFIG_MSM_BOOT_TIME_MARKER) += boot_marker.o
 obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
 obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o
 obj-$(CONFIG_MSM_GLADIATOR_ERP) += gladiator_erp.o
@@ -59,6 +60,7 @@
 obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_USB_XPRT) += ipc_router_usb_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_MHI_DEV_XPRT) += ipc_router_mhi_dev_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT) += ipc_router_glink_xprt.o
 obj-$(CONFIG_MSM_QMI_INTERFACE) += qmi_interface.o
 obj-$(CONFIG_MSM_GLINK_PKT) += msm_glink_pkt.o
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index dd8e45c..3e338f2 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -49,6 +49,8 @@
 #define BGRSB_POWER_DISABLE 0
 #define BGRSB_GLINK_POWER_ENABLE 6
 #define BGRSB_GLINK_POWER_DISABLE 7
+#define BGRSB_IN_TWM 8
+#define BGRSB_OUT_TWM 9
 
 
 struct bgrsb_regulator {
@@ -137,10 +139,12 @@
 	bool is_calibrd;
 
 	bool is_cnfgrd;
+	bool blk_rsb_cmnds;
 };
 
 static void *bgrsb_drv;
 static int bgrsb_enable(struct bgrsb_priv *dev, bool enable);
+static bool is_in_twm;
 
 int bgrsb_send_input(struct event *evnt)
 {
@@ -418,6 +422,7 @@
 	}
 
 	dev->is_cnfgrd = false;
+	dev->blk_rsb_cmnds = false;
 	pr_info("RSB current state is : %d\n", dev->bgrsb_current_state);
 
 	if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
@@ -457,6 +462,8 @@
 	}
 
 	dev->is_cnfgrd = false;
+	if (is_in_twm)
+		dev->blk_rsb_cmnds = true;
 
 	if (dev->handle)
 		glink_close(dev->handle);
@@ -832,9 +839,13 @@
 		dev->bttn_configs = (uint8_t)val;
 		queue_work(dev->bgrsb_wq, &dev->bttn_configr_work);
 		break;
+	case BGRSB_IN_TWM:
+		is_in_twm = true;
 	case BGRSB_GLINK_POWER_DISABLE:
 		queue_work(dev->bgrsb_wq, &dev->rsb_glink_down_work);
 		break;
+	case BGRSB_OUT_TWM:
+		is_in_twm = false;
 	case BGRSB_GLINK_POWER_ENABLE:
 		queue_work(dev->bgrsb_wq, &dev->rsb_glink_up_work);
 		break;
@@ -851,6 +862,12 @@
 	if (!arr)
 		return -ENOMEM;
 
+	if (dev->blk_rsb_cmnds) {
+		pr_err("Device is in TWM state\n");
+		kfree(arr);
+		return count;
+	}
+
 	if (!dev->is_cnfgrd) {
 		kfree(arr);
 		return -ENOMEDIUM;
diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c
new file mode 100644
index 0000000..b7a442c
--- /dev/null
+++ b/drivers/soc/qcom/boot_marker.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <soc/qcom/boot_stats.h>
+
+#define MAX_STRING_LEN 256
+#define BOOT_MARKER_MAX_LEN 40
+
+struct boot_marker {
+	char marker_name[BOOT_MARKER_MAX_LEN];
+	unsigned long long int timer_value;
+	struct list_head list;
+	struct mutex lock;
+};
+
+static struct dentry *dent_bkpi, *dent_bkpi_status, *dent_mpm_timer;
+static struct boot_marker boot_marker_list;
+
+static void _create_boot_marker(const char *name,
+		unsigned long long int timer_value)
+{
+	struct boot_marker *new_boot_marker;
+
+	pr_debug("%-41s:%llu.%03llu seconds\n", name,
+			timer_value/TIMER_KHZ,
+			((timer_value % TIMER_KHZ)
+			 * 1000) / TIMER_KHZ);
+
+	new_boot_marker = kmalloc(sizeof(*new_boot_marker), GFP_KERNEL);
+	if (!new_boot_marker)
+		return;
+
+	strlcpy(new_boot_marker->marker_name, name,
+			sizeof(new_boot_marker->marker_name));
+	new_boot_marker->timer_value = timer_value;
+
+	mutex_lock(&boot_marker_list.lock);
+	list_add_tail(&(new_boot_marker->list), &(boot_marker_list.list));
+	mutex_unlock(&boot_marker_list.lock);
+}
+
+static void set_bootloader_stats(void)
+{
+	_create_boot_marker("M - APPSBL Start - ",
+		readl_relaxed(&boot_stats->bootloader_start));
+	_create_boot_marker("M - APPSBL Display Init - ",
+		readl_relaxed(&boot_stats->bootloader_display));
+	_create_boot_marker("M - APPSBL Early-Domain Start - ",
+		readl_relaxed(&boot_stats->bootloader_early_domain_start));
+	_create_boot_marker("D - APPSBL Kernel Load Time - ",
+		readl_relaxed(&boot_stats->bootloader_load_kernel));
+	_create_boot_marker("D - APPSBL Kernel Auth Time - ",
+		readl_relaxed(&boot_stats->bootloader_checksum));
+	_create_boot_marker("M - APPSBL End - ",
+		readl_relaxed(&boot_stats->bootloader_end));
+}
+
+void place_marker(const char *name)
+{
+	_create_boot_marker((char *) name, msm_timer_get_sclk_ticks());
+}
+EXPORT_SYMBOL(place_marker);
+
+static ssize_t bootkpi_reader(struct file *fp, char __user *user_buffer,
+		size_t count, loff_t *position)
+{
+	int rc = 0;
+	char *buf;
+	int temp = 0;
+	struct boot_marker *marker;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&boot_marker_list.lock);
+	list_for_each_entry(marker, &boot_marker_list.list, list) {
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"%-41s:%llu.%03llu seconds\n",
+				marker->marker_name,
+				marker->timer_value/TIMER_KHZ,
+				(((marker->timer_value % TIMER_KHZ)
+				  * 1000) / TIMER_KHZ));
+	}
+	mutex_unlock(&boot_marker_list.lock);
+	rc = simple_read_from_buffer(user_buffer, count, position, buf, temp);
+	kfree(buf);
+	return rc;
+}
+
+static ssize_t bootkpi_writer(struct file *fp, const char __user *user_buffer,
+		size_t count, loff_t *position)
+{
+	int rc = 0;
+	char buf[MAX_STRING_LEN];
+
+	if (count > MAX_STRING_LEN)
+		return -EINVAL;
+	rc = simple_write_to_buffer(buf,
+			sizeof(buf) - 1, position, user_buffer, count);
+	if (rc < 0)
+		return rc;
+	buf[rc] = '\0';
+	place_marker(buf);
+	return rc;
+}
+
+static int bootkpi_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations fops_bkpi = {
+	.owner = THIS_MODULE,
+	.open  = bootkpi_open,
+	.read  = bootkpi_reader,
+	.write = bootkpi_writer,
+};
+
+static ssize_t mpm_timer_read(struct file *fp, char __user *user_buffer,
+		size_t count, loff_t *position)
+{
+	unsigned long long int timer_value;
+	int rc = 0;
+	char buf[100];
+	int temp = 0;
+
+	timer_value = msm_timer_get_sclk_ticks();
+
+	temp = scnprintf(buf, sizeof(buf), "%llu.%03llu seconds\n",
+			timer_value/TIMER_KHZ,
+			(((timer_value % TIMER_KHZ) * 1000) / TIMER_KHZ));
+
+	rc = simple_read_from_buffer(user_buffer, count, position, buf, temp);
+
+	return rc;
+}
+
+static int mpm_timer_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int mpm_timer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	phys_addr_t addr = msm_timer_get_pa();
+
+	if (vma->vm_flags & VM_WRITE)
+		return -EPERM;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	return vm_iomap_memory(vma, addr, PAGE_SIZE);
+}
+
+static const struct file_operations fops_mpm_timer = {
+	.owner = THIS_MODULE,
+	.open  = mpm_timer_open,
+	.read  = mpm_timer_read,
+	.mmap = mpm_timer_mmap,
+};
+
+static int __init init_bootkpi(void)
+{
+	dent_bkpi = debugfs_create_dir("bootkpi", NULL);
+	if (IS_ERR_OR_NULL(dent_bkpi))
+		return -ENODEV;
+
+	dent_bkpi_status = debugfs_create_file_unsafe("kpi_values",
+			0666, dent_bkpi, NULL, &fops_bkpi);
+	if (IS_ERR_OR_NULL(dent_bkpi_status)) {
+		debugfs_remove(dent_bkpi);
+		dent_bkpi = NULL;
+		pr_err("boot_marker: Could not create 'kpi_values' debugfs file\n");
+		return -ENODEV;
+	}
+
+	dent_mpm_timer = debugfs_create_file("mpm_timer",
+			0444, dent_bkpi, NULL, &fops_mpm_timer);
+	if (IS_ERR_OR_NULL(dent_mpm_timer)) {
+		debugfs_remove(dent_bkpi_status);
+		dent_bkpi_status = NULL;
+		debugfs_remove(dent_bkpi);
+		dent_bkpi = NULL;
+		pr_err("boot_marker: Could not create 'mpm_timer' debugfs file\n");
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&boot_marker_list.list);
+	mutex_init(&boot_marker_list.lock);
+	set_bootloader_stats();
+	return 0;
+}
+subsys_initcall(init_bootkpi);
+
+static void __exit exit_bootkpi(void)
+{
+	struct boot_marker *marker;
+	struct boot_marker *temp_addr;
+
+	debugfs_remove_recursive(dent_bkpi);
+	mutex_lock(&boot_marker_list.lock);
+	list_for_each_entry_safe(marker, temp_addr, &boot_marker_list.list,
+			list) {
+		list_del(&marker->list);
+		kfree(marker);
+	}
+	mutex_unlock(&boot_marker_list.lock);
+	boot_stats_exit();
+}
+module_exit(exit_bootkpi);
+
+MODULE_DESCRIPTION("MSM boot key performance indicators");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c
index 2fc9cbf..d9f36aa 100644
--- a/drivers/soc/qcom/boot_stats.c
+++ b/drivers/soc/qcom/boot_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014,2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,17 +22,12 @@
 #include <linux/sched.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-
-struct boot_stats {
-	uint32_t bootloader_start;
-	uint32_t bootloader_end;
-	uint32_t bootloader_display;
-	uint32_t bootloader_load_kernel;
-};
+#include <soc/qcom/boot_stats.h>
 
 static void __iomem *mpm_counter_base;
+static phys_addr_t mpm_counter_pa;
 static uint32_t mpm_counter_freq;
-static struct boot_stats __iomem *boot_stats;
+struct boot_stats __iomem *boot_stats;
 
 static int mpm_parse_dt(void)
 {
@@ -75,17 +70,58 @@
 static void print_boot_stats(void)
 {
 	pr_info("KPI: Bootloader start count = %u\n",
-		readl_relaxed(&boot_stats->bootloader_start));
+			readl_relaxed(&boot_stats->bootloader_start));
 	pr_info("KPI: Bootloader end count = %u\n",
-		readl_relaxed(&boot_stats->bootloader_end));
+			readl_relaxed(&boot_stats->bootloader_end));
 	pr_info("KPI: Bootloader display count = %u\n",
-		readl_relaxed(&boot_stats->bootloader_display));
+			readl_relaxed(&boot_stats->bootloader_display));
 	pr_info("KPI: Bootloader load kernel count = %u\n",
-		readl_relaxed(&boot_stats->bootloader_load_kernel));
+			readl_relaxed(&boot_stats->bootloader_load_kernel));
 	pr_info("KPI: Kernel MPM timestamp = %u\n",
-		readl_relaxed(mpm_counter_base));
+			readl_relaxed(mpm_counter_base));
 	pr_info("KPI: Kernel MPM Clock frequency = %u\n",
-		mpm_counter_freq);
+			mpm_counter_freq);
+}
+
+unsigned long long int msm_timer_get_sclk_ticks(void)
+{
+	unsigned long long int t1, t2;
+	int loop_count = 10;
+	int loop_zero_count = 3;
+	u64 tmp = USEC_PER_SEC;
+	void __iomem *sclk_tick;
+
+	do_div(tmp, TIMER_KHZ);
+	tmp /= (loop_zero_count-1);
+	sclk_tick = mpm_counter_base;
+	if (!sclk_tick)
+		return -EINVAL;
+	while (loop_zero_count--) {
+		t1 = __raw_readl_no_log(sclk_tick);
+		do {
+			udelay(1);
+			t2 = t1;
+			t1 = __raw_readl_no_log(sclk_tick);
+		} while ((t2 != t1) && --loop_count);
+		if (!loop_count) {
+			pr_err("boot_stats: SCLK  did not stabilize\n");
+			return 0;
+		}
+		if (t1)
+			break;
+
+		udelay(tmp);
+	}
+	if (!loop_zero_count) {
+		pr_err("boot_stats: SCLK reads zero\n");
+		return 0;
+	}
+	return t1;
+}
+
+phys_addr_t msm_timer_get_pa(void)
+{
+	return mpm_counter_pa;
 }
 
 int boot_stats_init(void)
@@ -98,9 +134,15 @@
 
 	print_boot_stats();
 
-	iounmap(boot_stats);
-	iounmap(mpm_counter_base);
+	if (!(boot_marker_enabled()))
+		boot_stats_exit();
 
 	return 0;
 }
 
+int boot_stats_exit(void)
+{
+	iounmap(boot_stats);
+	iounmap(mpm_counter_base);
+	return 0;
+}
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index aa5d09e..ac36b27 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -49,6 +49,7 @@
 #include <soc/qcom/service-notifier.h>
 #include <soc/qcom/socinfo.h>
 #include <soc/qcom/ramdump.h>
+#include <linux/thermal.h>
 
 #include "wlan_firmware_service_v01.h"
 
@@ -481,6 +482,9 @@
 	uint32_t fw_early_crash_irq;
 	struct completion unblock_shutdown;
 	bool is_ssr;
+	struct thermal_cooling_device *tcdev;
+	unsigned long curr_thermal_state;
+	unsigned long max_thermal_state;
 } *penv;
 
 #ifdef CONFIG_ICNSS_DEBUG
@@ -2430,6 +2434,115 @@
 	return ret;
 }
 
+static int icnss_tcdev_get_max_state(struct thermal_cooling_device *tcdev,
+					unsigned long *thermal_state)
+{
+	struct icnss_priv *priv = tcdev->devdata;
+
+	*thermal_state = priv->max_thermal_state;
+
+	return 0;
+}
+
+
+static int icnss_tcdev_get_cur_state(struct thermal_cooling_device *tcdev,
+					unsigned long *thermal_state)
+{
+	struct icnss_priv *priv = tcdev->devdata;
+
+	*thermal_state = priv->curr_thermal_state;
+
+	return 0;
+}
+
+
+static int icnss_tcdev_set_cur_state(struct thermal_cooling_device *tcdev,
+					unsigned long thermal_state)
+{
+	struct icnss_priv *priv = tcdev->devdata;
+	struct device *dev = &priv->pdev->dev;
+	int ret = 0;
+
+	priv->curr_thermal_state = thermal_state;
+
+	if (!priv->ops || !priv->ops->set_therm_state)
+		return 0;
+
+	icnss_pr_vdbg("Cooling device set current state: %ld",
+							thermal_state);
+
+	ret = priv->ops->set_therm_state(dev, thermal_state);
+
+	if (ret)
+		icnss_pr_err("Setting Current Thermal State Failed: %d\n", ret);
+
+	return 0;
+}
+
+static struct thermal_cooling_device_ops icnss_cooling_ops = {
+	.get_max_state = icnss_tcdev_get_max_state,
+	.get_cur_state = icnss_tcdev_get_cur_state,
+	.set_cur_state = icnss_tcdev_set_cur_state,
+};
+
+int icnss_thermal_register(struct device *dev, unsigned long max_state)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	priv->max_thermal_state = max_state;
+
+	if (of_find_property(dev->of_node, "#cooling-cells", NULL)) {
+		priv->tcdev = thermal_of_cooling_device_register(dev->of_node,
+						"icnss", priv,
+						&icnss_cooling_ops);
+		if (IS_ERR_OR_NULL(priv->tcdev)) {
+			ret = PTR_ERR(priv->tcdev);
+			icnss_pr_err("Cooling device register failed: %d\n",
+								ret);
+		} else {
+			icnss_pr_vdbg("Cooling device registered");
+		}
+	} else {
+		icnss_pr_dbg("Cooling device registration not supported");
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(icnss_thermal_register);
+
+void icnss_thermal_unregister(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!IS_ERR_OR_NULL(priv->tcdev))
+		thermal_cooling_device_unregister(priv->tcdev);
+
+	priv->tcdev = NULL;
+}
+EXPORT_SYMBOL(icnss_thermal_unregister);
+
+int icnss_get_curr_therm_state(struct device *dev,
+					unsigned long *thermal_state)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(priv->tcdev)) {
+		ret = PTR_ERR(priv->tcdev);
+		icnss_pr_err("Get current thermal state failed: %d\n", ret);
+		return ret;
+	}
+
+	icnss_pr_vdbg("Cooling device current state: %ld",
+					priv->curr_thermal_state);
+
+	*thermal_state = priv->curr_thermal_state;
+	return ret;
+}
+EXPORT_SYMBOL(icnss_get_curr_therm_state);
+
 static int icnss_driver_event_register_driver(void *data)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c
new file mode 100644
index 0000000..d0b650d
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c
@@ -0,0 +1,733 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_router_xprt.h>
+#include <linux/module.h>
+#include <linux/msm_mhi_dev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+static int ipc_router_mhi_dev_xprt_debug_mask;
+module_param_named(debug_mask, ipc_router_mhi_dev_xprt_debug_mask, int, 0664);
+
+#define D(x...) do { \
+if (ipc_router_mhi_dev_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+
+#define MODULE_NAME "ipc_router_mhi_dev_xprt"
+#define XPRT_NAME_LEN 32
+#define IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE 8192
+#define MHI_IPCR_ASYNC_TIMEOUT msecs_to_jiffies(100)
+#define MAX_IPCR_WR_REQ 128
+
+/**
+ * ipc_router_mhi_dev_channel - MHI Channel related information
+ * @out_chan_id: Out channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @out_handle: MHI Output channel handle.
+ * @in_chan_id: In channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @in_handle: MHI Input channel handle.
+ * @state_lock: Lock to protect access to the state information.
+ * @out_chan_enabled: State of the outgoing channel.
+ * @in_chan_enabled: State of the incoming channel.
+ * @max_packet_size: Possible maximum packet size.
+ * @mhi_xprtp: Pointer to IPC Router MHI XPRT.
+ */
+struct ipc_router_mhi_dev_channel {
+	enum mhi_client_channel out_chan_id;
+	struct mhi_dev_client *out_handle;
+	enum mhi_client_channel in_chan_id;
+	struct mhi_dev_client *in_handle;
+	struct mutex state_lock;
+	bool out_chan_enabled;
+	bool in_chan_enabled;
+	size_t max_packet_size;
+	void *mhi_xprtp;
+};
+
+/**
+ * ipc_router_mhi_dev_xprt - IPC Router's MHI XPRT structure
+ * @list: IPC router's MHI XPRTs list.
+ * @ch_hndl: Data Structure to hold MHI Channel information.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @xprt: IPC Router XPRT structure to contain MHI XPRT specific info.
+ * @wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from MHI Driver.
+ * @write_wait_q: Wait Queue to handle the write events.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @wr_req_lock: Lock for write request list
+ * @wreqs: List of write requests
+ * @wr_req_list: Head of the list of available write requests
+ * @read_done: Completion for async read.
+ */
+struct ipc_router_mhi_dev_xprt {
+	struct list_head list;
+	struct ipc_router_mhi_dev_channel ch_hndl;
+	char xprt_name[XPRT_NAME_LEN];
+	struct msm_ipc_router_xprt xprt;
+	struct workqueue_struct *wq;
+	struct work_struct read_work;
+	wait_queue_head_t write_wait_q;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	spinlock_t wr_req_lock;
+	struct mhi_req *wreqs;
+	struct list_head wr_req_list;
+	struct completion read_done;
+};
+
+/**
+ * ipc_router_mhi_dev_xprt_config - Config. Info. of each MHI XPRT
+ * @out_chan_id: Out channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @in_chan_id: In channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct ipc_router_mhi_dev_xprt_config {
+	enum mhi_client_channel out_chan_id;
+	enum mhi_client_channel in_chan_id;
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t link_id;
+	uint32_t xprt_version;
+	uint32_t xprt_option;
+};
+
+static struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+/*
+ * ipc_router_mhi_dev_release_pkt() - Release a cloned IPC Router packet
+ * @ref: Reference to the kref object in the IPC Router packet.
+ */
+static void ipc_router_mhi_dev_release_pkt(struct kref *ref)
+{
+	struct rr_packet *pkt = container_of(ref, struct rr_packet, ref);
+
+	release_pkt(pkt);
+}
+
+/**
+ * ipc_router_mhi_dev_set_xprt_version() - Set the IPC Router version
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_mhi_dev_set_xprt_version(
+	struct msm_ipc_router_xprt *xprt,
+	unsigned int version)
+{
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_dev_xprt, xprt);
+	mhi_xprtp->xprt_version = version;
+}
+
+/**
+ * ipc_router_mhi_dev_get_xprt_version() - Get IPC Router header version
+ *				       supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int ipc_router_mhi_dev_get_xprt_version(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_dev_xprt, xprt);
+
+	return (int)mhi_xprtp->xprt_version;
+}
+
+/**
+ * ipc_router_mhi_dev_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int ipc_router_mhi_dev_get_xprt_option(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_dev_xprt, xprt);
+
+	return (int)mhi_xprtp->xprt_option;
+}
+
+static void mhi_xprt_write_completion_cb(void *req)
+{
+	struct mhi_req *ureq = req;
+	struct rr_packet *pkt = ureq->context;
+	unsigned long flags;
+
+	if (pkt)
+		kref_put(&pkt->ref, ipc_router_mhi_dev_release_pkt);
+
+	spin_lock_irqsave(&mhi_xprtp->wr_req_lock, flags);
+	list_add_tail(&ureq->list, &mhi_xprtp->wr_req_list);
+	spin_unlock_irqrestore(&mhi_xprtp->wr_req_lock, flags);
+}
+
+static void mhi_xprt_read_completion_cb(void *req)
+{
+	struct mhi_req *ureq = req;
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+	mhi_xprtp = (struct ipc_router_mhi_dev_xprt *)ureq->context;
+	complete(&mhi_xprtp->read_done);
+}
+
+static void mhi_xprt_event_notifier(struct mhi_dev_client_cb_reason *reason)
+{
+	if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
+		if (reason->ch_id == mhi_xprtp->ch_hndl.in_chan_id)
+			wake_up(&mhi_xprtp->write_wait_q);
+		else if (reason->ch_id == mhi_xprtp->ch_hndl.out_chan_id)
+			queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work);
+	}
+}
+
+/*
+ * mhi_xprt_read_data() - Read data from the XPRT
+ */
+static void mhi_xprt_read_data(struct work_struct *work)
+{
+	struct sk_buff *skb;
+	struct rr_packet *in_pkt;
+	struct mhi_req mreq = {0};
+	int data_sz;
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp =
+		container_of(work, struct ipc_router_mhi_dev_xprt, read_work);
+	unsigned long compl_ret;
+
+	while (!mhi_dev_channel_isempty(mhi_xprtp->ch_hndl.out_handle)) {
+		in_pkt = create_pkt(NULL);
+		if (!in_pkt) {
+			IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n",
+				    __func__);
+			return;
+		}
+		D("%s: Allocated rr_packet\n", __func__);
+
+		skb = alloc_skb(mhi_xprtp->ch_hndl.max_packet_size, GFP_KERNEL);
+		if (!skb) {
+			IPC_RTR_ERR("%s: Could not allocate SKB\n", __func__);
+			goto exit_free_pkt;
+		}
+
+		mreq.client = mhi_xprtp->ch_hndl.out_handle;
+		mreq.context = mhi_xprtp;
+		mreq.buf = skb->data;
+		mreq.len = mhi_xprtp->ch_hndl.max_packet_size;
+		mreq.chan = mhi_xprtp->ch_hndl.out_chan_id;
+		mreq.mode = IPA_DMA_ASYNC;
+		mreq.client_cb = mhi_xprt_read_completion_cb;
+
+		reinit_completion(&mhi_xprtp->read_done);
+
+		data_sz = mhi_dev_read_channel(&mreq);
+		if (data_sz < 0) {
+			IPC_RTR_ERR("%s: Failed to queue TRB into MHI\n",
+				    __func__);
+			goto exit_free_skb;
+		} else if (!data_sz) {
+			D("%s: No data available\n", __func__);
+			goto exit_free_skb;
+		}
+
+		compl_ret =
+			wait_for_completion_interruptible_timeout(
+				&mhi_xprtp->read_done,
+				MHI_IPCR_ASYNC_TIMEOUT);
+
+		if (compl_ret == -ERESTARTSYS) {
+			IPC_RTR_ERR("%s: Exit signal caught\n", __func__);
+			goto exit_free_skb;
+		} else if (compl_ret == 0) {
+			IPC_RTR_ERR("%s: Read timed out\n", __func__);
+			goto exit_free_skb;
+		}
+
+		if (mreq.transfer_len != ipc_router_peek_pkt_size(skb->data)) {
+			IPC_RTR_ERR("%s: Incomplete packet, drop it\n",
+				    __func__);
+			goto exit_free_skb;
+		}
+
+		skb_put(skb, mreq.transfer_len);
+		in_pkt->length = mreq.transfer_len;
+		skb_queue_tail(in_pkt->pkt_fragment_q, skb);
+
+		D("%s: Packet size read %d\n", __func__, in_pkt->length);
+		msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+					   IPC_ROUTER_XPRT_EVENT_DATA,
+					   (void *)in_pkt);
+		release_pkt(in_pkt);
+	}
+
+	return;
+
+exit_free_skb:
+	kfree_skb(skb);
+exit_free_pkt:
+	release_pkt(in_pkt);
+}
+
+/**
+ * ipc_router_mhi_dev_write_skb() - Write a single SKB onto the XPRT
+ * @mhi_xprtp: XPRT in which the SKB has to be written.
+ * @skb: SKB to be written.
+ * @pkt: IPC packet, for reference count purposes
+ *
+ * @return: return number of bytes written on success,
+ *          standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_dev_write_skb(
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp,
+	struct sk_buff *skb, struct rr_packet *pkt)
+{
+	struct mhi_req *wreq;
+
+	if (skb->len > mhi_xprtp->ch_hndl.max_packet_size) {
+		IPC_RTR_ERR("%s: Packet size is above the limit\n", __func__);
+		return -EINVAL;
+	}
+
+	wait_event_interruptible(mhi_xprtp->write_wait_q,
+		!mhi_dev_channel_isempty(mhi_xprtp->ch_hndl.in_handle) ||
+					 !mhi_xprtp->ch_hndl.in_chan_enabled);
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	if (!mhi_xprtp->ch_hndl.in_chan_enabled) {
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		IPC_RTR_ERR("%s: %s chnl reset\n",
+			    __func__, mhi_xprtp->xprt_name);
+		return -ENETRESET;
+	}
+
+	spin_lock_irq(&mhi_xprtp->wr_req_lock);
+	if (list_empty(&mhi_xprtp->wr_req_list)) {
+		IPC_RTR_ERR("%s: Write request pool empty\n", __func__);
+		spin_unlock_irq(&mhi_xprtp->wr_req_lock);
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		return -ENOMEM;
+	}
+	wreq = container_of(mhi_xprtp->wr_req_list.next,
+			    struct mhi_req, list);
+	list_del_init(&wreq->list);
+
+	kref_get(&pkt->ref);
+
+	wreq->client = mhi_xprtp->ch_hndl.in_handle;
+	wreq->context = pkt;
+	wreq->buf = skb->data;
+	wreq->len = skb->len;
+	if (list_empty(&wreq->list))
+		wreq->snd_cmpl = 1;
+	else
+		wreq->snd_cmpl = 0;
+	spin_unlock_irq(&mhi_xprtp->wr_req_lock);
+
+	if (mhi_dev_write_channel(wreq) < 0) {
+		spin_lock_irq(&mhi_xprtp->wr_req_lock);
+		list_add_tail(&wreq->list, &mhi_xprtp->wr_req_list);
+		spin_unlock_irq(&mhi_xprtp->wr_req_lock);
+		kref_put(&pkt->ref, ipc_router_mhi_dev_release_pkt);
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		IPC_RTR_ERR("%s: Error queueing mhi_xfer\n", __func__);
+		return -EFAULT;
+	}
+
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+
+	return skb->len;
+}
+
+/**
+ * ipc_router_mhi_dev_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_dev_write(void *data,
+	uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *ipc_rtr_pkt;
+	struct rr_packet *cloned_pkt;
+	int rc = 0;
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp =
+		container_of(xprt, struct ipc_router_mhi_dev_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	cloned_pkt = clone_pkt(pkt);
+	if (!cloned_pkt) {
+		pr_err("%s: Error in cloning packet while tx\n", __func__);
+		return -ENOMEM;
+	}
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	skb_queue_walk(cloned_pkt->pkt_fragment_q, ipc_rtr_pkt) {
+		rc = ipc_router_mhi_dev_write_skb(mhi_xprtp, ipc_rtr_pkt,
+						  cloned_pkt);
+		if (rc < 0) {
+			IPC_RTR_ERR("%s: Error writing SKB %d\n",
+				    __func__, rc);
+			break;
+		}
+	}
+
+	kref_put(&cloned_pkt->ref, ipc_router_mhi_dev_release_pkt);
+	if (rc < 0)
+		return rc;
+	else
+		return len;
+}
+
+/**
+ * ipc_router_mhi_dev_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_dev_close(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_dev_xprt, xprt);
+
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.out_chan_enabled = false;
+	mhi_xprtp->ch_hndl.in_chan_enabled = false;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	flush_workqueue(mhi_xprtp->wq);
+
+	return 0;
+}
+
+static int mhi_dev_xprt_alloc_write_reqs(
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp)
+{
+	int i;
+
+	mhi_xprtp->wreqs = kcalloc(MAX_IPCR_WR_REQ,
+				   sizeof(struct mhi_req),
+				   GFP_KERNEL);
+	if (!mhi_xprtp->wreqs) {
+		IPC_RTR_ERR("%s: Write reqs alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&mhi_xprtp->wr_req_list);
+	for (i = 0; i < MAX_IPCR_WR_REQ; ++i) {
+		mhi_xprtp->wreqs[i].chan = mhi_xprtp->ch_hndl.in_chan_id;
+		mhi_xprtp->wreqs[i].mode = IPA_DMA_ASYNC;
+		mhi_xprtp->wreqs[i].client_cb = mhi_xprt_write_completion_cb;
+		list_add_tail(&mhi_xprtp->wreqs[i].list,
+			      &mhi_xprtp->wr_req_list);
+	}
+
+	D("%s: write reqs allocation successful\n", __func__);
+	return 0;
+}
+
+/**
+ * ipc_router_mhi_dev_driver_register() - register for MHI channels
+ *
+ * @mhi_xprtp: pointer to IPC router mhi xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added and the MHI
+ * channel is ready.
+ */
+static int ipc_router_mhi_dev_driver_register(
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp)
+{
+	int rc;
+
+	rc = mhi_dev_open_channel(mhi_xprtp->ch_hndl.out_chan_id,
+		&mhi_xprtp->ch_hndl.out_handle, mhi_xprt_event_notifier);
+	if (rc) {
+		IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
+			__func__, mhi_xprtp->ch_hndl.out_chan_id, rc);
+		goto exit;
+	}
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.out_chan_enabled = true;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+
+	rc = mhi_dev_open_channel(mhi_xprtp->ch_hndl.in_chan_id,
+		&mhi_xprtp->ch_hndl.in_handle, mhi_xprt_event_notifier);
+	if (rc) {
+		IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
+			__func__, mhi_xprtp->ch_hndl.in_chan_id, rc);
+		goto close_out_chan;
+	}
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.in_chan_enabled = true;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+
+	rc = mhi_dev_xprt_alloc_write_reqs(mhi_xprtp);
+	if (rc)
+		goto close_in_chan;
+
+	/* Register the XPRT before receiving any data */
+	msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+			   IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	  __func__, mhi_xprtp->xprt.name);
+
+	return 0;
+
+close_in_chan:
+	mhi_dev_close_channel(mhi_xprtp->ch_hndl.in_handle);
+close_out_chan:
+	mhi_dev_close_channel(mhi_xprtp->ch_hndl.out_handle);
+exit:
+	return rc;
+}
+
+/**
+ * mhi_xprt_enable_cb() - Enable the MHI link for communication
+ */
+static void mhi_dev_xprt_state_cb(struct mhi_dev_client_cb_data *cb_data)
+{
+	int rc;
+	struct ipc_router_mhi_dev_xprt *mhi_xprtp =
+		(struct ipc_router_mhi_dev_xprt *)cb_data->user_data;
+
+	/* Channel already enabled */
+	if (mhi_xprtp->ch_hndl.in_chan_enabled)
+		return;
+
+	rc = ipc_router_mhi_dev_driver_register(mhi_xprtp);
+	if (rc)
+		IPC_RTR_ERR("%s: Failed to regiter for MHI channels\n",
+			    __func__);
+}
+
+/**
+ * ipc_router_mhi_dev_config_init() - init MHI xprt configs
+ *
+ * @mhi_xprt_config: pointer to MHI xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the MHI XPRT pointer with
+ * the MHI XPRT configurations from device tree.
+ */
+static int ipc_router_mhi_dev_config_init(
+	struct ipc_router_mhi_dev_xprt_config *mhi_xprt_config,
+	struct device *dev)
+{
+	char wq_name[XPRT_NAME_LEN];
+	int rc = 0;
+
+	mhi_xprtp = devm_kzalloc(dev, sizeof(struct ipc_router_mhi_dev_xprt),
+				 GFP_KERNEL);
+	if (IS_ERR_OR_NULL(mhi_xprtp)) {
+		IPC_RTR_ERR("%s: devm_kzalloc() failed for mhi_xprtp:%s\n",
+			__func__, mhi_xprt_config->xprt_name);
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	scnprintf(wq_name, XPRT_NAME_LEN, "MHI_DEV_XPRT%x:%x",
+		  mhi_xprt_config->out_chan_id, mhi_xprt_config->in_chan_id);
+	mhi_xprtp->wq = create_singlethread_workqueue(wq_name);
+	if (!mhi_xprtp->wq) {
+		IPC_RTR_ERR("%s: %s create WQ failed\n",
+			__func__, mhi_xprt_config->xprt_name);
+		rc = -EFAULT;
+		goto free_mhi_xprtp;
+	}
+
+	INIT_WORK(&mhi_xprtp->read_work, mhi_xprt_read_data);
+	init_waitqueue_head(&mhi_xprtp->write_wait_q);
+	mhi_xprtp->xprt_version = mhi_xprt_config->xprt_version;
+	mhi_xprtp->xprt_option = mhi_xprt_config->xprt_option;
+	strlcpy(mhi_xprtp->xprt_name, mhi_xprt_config->xprt_name,
+		XPRT_NAME_LEN);
+
+	/* Initialize XPRT operations and parameters registered with IPC RTR */
+	mhi_xprtp->xprt.link_id = mhi_xprt_config->link_id;
+	mhi_xprtp->xprt.name = mhi_xprtp->xprt_name;
+	mhi_xprtp->xprt.get_version = ipc_router_mhi_dev_get_xprt_version;
+	mhi_xprtp->xprt.set_version = ipc_router_mhi_dev_set_xprt_version;
+	mhi_xprtp->xprt.get_option = ipc_router_mhi_dev_get_xprt_option;
+	mhi_xprtp->xprt.read_avail = NULL;
+	mhi_xprtp->xprt.read = NULL;
+	mhi_xprtp->xprt.write_avail = NULL;
+	mhi_xprtp->xprt.write = ipc_router_mhi_dev_write;
+	mhi_xprtp->xprt.close = ipc_router_mhi_dev_close;
+	mhi_xprtp->xprt.sft_close_done = NULL;
+	mhi_xprtp->xprt.priv = NULL;
+
+	/* Initialize channel handle parameters */
+	mhi_xprtp->ch_hndl.out_chan_id = mhi_xprt_config->out_chan_id;
+	mhi_xprtp->ch_hndl.in_chan_id = mhi_xprt_config->in_chan_id;
+	mutex_init(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.max_packet_size = IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE;
+	mhi_xprtp->ch_hndl.mhi_xprtp = mhi_xprtp;
+	init_completion(&mhi_xprtp->read_done);
+	spin_lock_init(&mhi_xprtp->wr_req_lock);
+
+	/* Register callback to mhi_dev */
+	rc = mhi_register_state_cb(mhi_dev_xprt_state_cb, mhi_xprtp,
+		mhi_xprtp->ch_hndl.in_chan_id);
+	if (rc && rc != -EEXIST) {
+		IPC_RTR_ERR("%s: Failed to register MHI state cb\n", __func__);
+		goto destroy_wq;
+	} else if (rc == -EEXIST) {
+		rc = ipc_router_mhi_dev_driver_register(mhi_xprtp);
+		if (rc) {
+			IPC_RTR_ERR("%s: Failed to regiter for MHI channels\n",
+				    __func__);
+			goto destroy_wq;
+		}
+	}
+
+	return 0;
+
+destroy_wq:
+	destroy_workqueue(mhi_xprtp->wq);
+free_mhi_xprtp:
+	kfree(mhi_xprtp);
+exit:
+	return rc;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @mhi_xprt_config: pointer to MHI XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct ipc_router_mhi_dev_xprt_config *mhi_xprt_config)
+{
+	int rc;
+	uint32_t out_chan_id;
+	uint32_t in_chan_id;
+	const char *remote_ss;
+	uint32_t link_id;
+	uint32_t version;
+	char *key;
+
+	key = "qcom,out-chan-id";
+	rc = of_property_read_u32(node, key, &out_chan_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->out_chan_id = out_chan_id;
+
+	key = "qcom,in-chan-id";
+	rc = of_property_read_u32(node, key, &in_chan_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->in_chan_id = in_chan_id;
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+
+	key = "qcom,xprt-linkid";
+	rc = of_property_read_u32(node, key, &link_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	rc = of_property_read_u32(node, key, &version);
+	if (rc)
+		goto error;
+	mhi_xprt_config->xprt_version = version;
+
+	mhi_xprt_config->xprt_option = 0;
+
+	scnprintf(mhi_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_IPCRTR",
+		  remote_ss);
+
+	return 0;
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * ipc_router_mhi_dev_xprt_probe() - Probe an MHI xprt
+ * @pdev: Platform device corresponding to MHI xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an MHI transport.
+ */
+static int ipc_router_mhi_dev_xprt_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct ipc_router_mhi_dev_xprt_config mhi_xprt_config;
+
+	if (pdev && pdev->dev.of_node) {
+		rc = parse_devicetree(pdev->dev.of_node, &mhi_xprt_config);
+		if (rc) {
+			IPC_RTR_ERR("%s: failed to parse device tree\n",
+				    __func__);
+			return rc;
+		}
+
+		rc = ipc_router_mhi_dev_config_init(&mhi_xprt_config,
+						    &pdev->dev);
+		if (rc) {
+			if (rc == -ENXIO)
+				return -EPROBE_DEFER;
+			IPC_RTR_ERR("%s: init failed\n", __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static const struct of_device_id ipc_router_mhi_dev_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc-router-mhi-dev-xprt" },
+	{},
+};
+
+static struct platform_driver ipc_router_mhi_dev_xprt_driver = {
+	.probe = ipc_router_mhi_dev_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_mhi_dev_xprt_match_table,
+	},
+};
+module_platform_driver(ipc_router_mhi_dev_xprt_driver);
+
+MODULE_DESCRIPTION("IPC Router MHI_DEV XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e..038abc3 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@
 	struct resource *res;
 	void __iomem *base;
 	struct gsbi_info *gsbi;
-	int i;
+	int i, ret;
 	u32 mask, gsbi_num;
 	const struct crci_config *config = NULL;
 
@@ -221,7 +221,10 @@
 
 	platform_set_drvdata(pdev, gsbi);
 
-	return of_platform_populate(node, NULL, NULL, &pdev->dev);
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	if (ret)
+		clk_disable_unprepare(gsbi->hclk);
+	return ret;
 }
 
 static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b6214eb..c9489e3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -642,6 +642,9 @@
 
 	/* QM215 ID */
 	[386] = {MSM_CPU_QM215, "QM215"},
+
+	/* SDM429W IDs*/
+	[416] = {MSM_CPU_SDM429W, "SDM429W"},
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1629,6 +1632,10 @@
 		dummy_socinfo.id = 364;
 		strlcpy(dummy_socinfo.build_id, "sda429 - ",
 				sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm429w()) {
+		dummy_socinfo.id = 416;
+		strlcpy(dummy_socinfo.build_id, "sdm429w - ",
+				sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_mdm9607()) {
 		dummy_socinfo.id = 290;
 		strlcpy(dummy_socinfo.build_id, "mdm9607 - ",
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index de2c1bf..c4f5e5b 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -131,13 +131,17 @@
 	/* take over the memory region from the early initialization */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	fuse->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(fuse->base))
-		return PTR_ERR(fuse->base);
+	if (IS_ERR(fuse->base)) {
+		err = PTR_ERR(fuse->base);
+		fuse->base = base;
+		return err;
+	}
 
 	fuse->clk = devm_clk_get(&pdev->dev, "fuse");
 	if (IS_ERR(fuse->clk)) {
 		dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
 			PTR_ERR(fuse->clk));
+		fuse->base = base;
 		return PTR_ERR(fuse->clk);
 	}
 
@@ -146,8 +150,10 @@
 
 	if (fuse->soc->probe) {
 		err = fuse->soc->probe(fuse);
-		if (err < 0)
+		if (err < 0) {
+			fuse->base = base;
 			return err;
+		}
 	}
 
 	if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 9685f9b8..a12710c 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -512,16 +512,10 @@
  */
 int tegra_powergate_is_powered(unsigned int id)
 {
-	int status;
-
 	if (!tegra_powergate_is_valid(id))
 		return -EINVAL;
 
-	mutex_lock(&pmc->powergates_lock);
-	status = tegra_powergate_state(id);
-	mutex_unlock(&pmc->powergates_lock);
-
-	return status;
+	return tegra_powergate_state(id);
 }
 
 /**
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 3f3751e..f2209ec 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1668,6 +1668,7 @@
 			platform_info->enable_dma = false;
 		} else {
 			master->can_dma = pxa2xx_spi_can_dma;
+			master->max_dma_len = MAX_DMA_LEN;
 		}
 	}
 
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index caeac66..4cb72a8 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -457,8 +457,8 @@
 	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
 	if (qspi->ctrl_base) {
 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
-				   MEM_CS_EN(spi->chip_select),
-				   MEM_CS_MASK);
+				   MEM_CS_MASK,
+				   MEM_CS_EN(spi->chip_select));
 	}
 	qspi->mmap_enabled = true;
 }
@@ -470,7 +470,7 @@
 	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
 	if (qspi->ctrl_base)
 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
-				   0, MEM_CS_MASK);
+				   MEM_CS_MASK, 0);
 	qspi->mmap_enabled = false;
 }
 
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c7..1991626 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -38,11 +38,6 @@
 			.name	= "system",
 		},
 		{
-			.id	= ION_HEAP_TYPE_SYSTEM_CONTIG,
-			.type	= ION_HEAP_TYPE_SYSTEM_CONTIG,
-			.name	= "system contig",
-		},
-		{
 			.id	= ION_HEAP_TYPE_CARVEOUT,
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= "carveout",
@@ -76,34 +71,30 @@
 		return -ENOMEM;
 
 
-	/* Allocate a dummy carveout heap */
-	carveout_ptr = alloc_pages_exact(
-				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
-				GFP_KERNEL);
-	if (carveout_ptr)
-		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
-						virt_to_phys(carveout_ptr);
-	else
-		pr_err("ion_dummy: Could not allocate carveout\n");
-
-	/* Allocate a dummy chunk heap */
-	chunk_ptr = alloc_pages_exact(
-				dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
-				GFP_KERNEL);
-	if (chunk_ptr)
-		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
-	else
-		pr_err("ion_dummy: Could not allocate chunk\n");
-
 	for (i = 0; i < dummy_ion_pdata.nr; i++) {
 		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
 
-		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
-		    !heap_data->base)
-			continue;
+		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
+			/* Allocate a dummy carveout heap */
+			carveout_ptr = alloc_pages_exact(heap_data->size,
+							 GFP_KERNEL);
+			if (!carveout_ptr) {
+				pr_err("ion_dummy: Could not allocate carveout\n");
+				continue;
+			}
+			heap_data->base = virt_to_phys(carveout_ptr);
+		}
 
-		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
-			continue;
+		if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
+			/* Allocate a dummy chunk heap */
+			chunk_ptr = alloc_pages_exact(heap_data->size,
+							GFP_KERNEL);
+			if (!chunk_ptr) {
+				pr_err("ion_dummy: Could not allocate chunk\n");
+				continue;
+			}
+			heap_data->base = virt_to_phys(chunk_ptr);
+		}
 
 		heaps[i] = ion_heap_create(heap_data);
 		if (IS_ERR_OR_NULL(heaps[i])) {
@@ -114,20 +105,28 @@
 	}
 	return 0;
 err:
-	for (i = 0; i < dummy_ion_pdata.nr; ++i)
-		ion_heap_destroy(heaps[i]);
+	for (i = 0; i < dummy_ion_pdata.nr; ++i) {
+		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+		if (!IS_ERR_OR_NULL(heaps[i]))
+			ion_heap_destroy(heaps[i]);
+
+		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
+			if (carveout_ptr) {
+				free_pages_exact(carveout_ptr,
+						 heap_data->size);
+			}
+			carveout_ptr = NULL;
+		}
+		if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
+			if (chunk_ptr) {
+				free_pages_exact(chunk_ptr, heap_data->size);
+			}
+			chunk_ptr = NULL;
+		}
+	}
 	kfree(heaps);
 
-	if (carveout_ptr) {
-		free_pages_exact(carveout_ptr,
-				 dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
-		carveout_ptr = NULL;
-	}
-	if (chunk_ptr) {
-		free_pages_exact(chunk_ptr,
-				 dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
-		chunk_ptr = NULL;
-	}
 	return err;
 }
 device_initcall(ion_dummy_init);
@@ -138,19 +137,26 @@
 
 	ion_device_destroy(idev);
 
-	for (i = 0; i < dummy_ion_pdata.nr; i++)
-		ion_heap_destroy(heaps[i]);
-	kfree(heaps);
+	for (i = 0; i < dummy_ion_pdata.nr; ++i) {
+		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
 
-	if (carveout_ptr) {
-		free_pages_exact(carveout_ptr,
-				 dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
-		carveout_ptr = NULL;
+		if (!IS_ERR_OR_NULL(heaps[i]))
+			ion_heap_destroy(heaps[i]);
+
+		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT) {
+			if (carveout_ptr) {
+				free_pages_exact(carveout_ptr,
+						 heap_data->size);
+			}
+			carveout_ptr = NULL;
+		}
+		if (heap_data->type == ION_HEAP_TYPE_CHUNK) {
+			if (chunk_ptr) {
+				free_pages_exact(chunk_ptr, heap_data->size);
+			}
+			chunk_ptr = NULL;
+		}
 	}
-	if (chunk_ptr) {
-		free_pages_exact(chunk_ptr,
-				 dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
-		chunk_ptr = NULL;
-	}
+	kfree(heaps);
 }
 __exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 027094f..b62bff7 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -59,6 +59,10 @@
 #else
 #define _ZONE ZONE_NORMAL
 #endif
+#include <linux/circ_buf.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace/lowmemorykiller.h"
@@ -94,6 +98,157 @@
 			pr_info(x);			\
 	} while (0)
 
+
+static DECLARE_WAIT_QUEUE_HEAD(event_wait);
+static DEFINE_SPINLOCK(lmk_event_lock);
+static struct circ_buf event_buffer;
+#define MAX_BUFFERED_EVENTS 8
+#define MAX_TASKNAME 128
+
+struct lmk_event {
+	char taskname[MAX_TASKNAME];
+	pid_t pid;
+	uid_t uid;
+	pid_t group_leader_pid;
+	unsigned long min_flt;
+	unsigned long maj_flt;
+	unsigned long rss_in_pages;
+	short oom_score_adj;
+	short min_score_adj;
+	unsigned long long start_time;
+	struct list_head list;
+};
+
+void handle_lmk_event(struct task_struct *selected, short min_score_adj)
+{
+	int head;
+	int tail;
+	struct lmk_event *events;
+	struct lmk_event *event;
+	int res;
+	long rss_in_pages = -1;
+	struct mm_struct *mm = get_task_mm(selected);
+
+	if (mm) {
+		rss_in_pages = get_mm_rss(mm);
+		mmput(mm);
+	}
+
+	spin_lock(&lmk_event_lock);
+
+	head = event_buffer.head;
+	tail = READ_ONCE(event_buffer.tail);
+
+	/* Do not continue to log if no space remains in the buffer. */
+	if (CIRC_SPACE(head, tail, MAX_BUFFERED_EVENTS) < 1) {
+		spin_unlock(&lmk_event_lock);
+		return;
+	}
+
+	events = (struct lmk_event *) event_buffer.buf;
+	event = &events[head];
+
+	res = get_cmdline(selected, event->taskname, MAX_TASKNAME - 1);
+
+	/* No valid process name means this is definitely not associated with a
+	 * userspace activity.
+	 */
+
+	if (res <= 0 || res >= MAX_TASKNAME) {
+		spin_unlock(&lmk_event_lock);
+		return;
+	}
+
+	event->taskname[res] = '\0';
+	event->pid = selected->pid;
+	event->uid = from_kuid_munged(current_user_ns(), task_uid(selected));
+	if (selected->group_leader)
+		event->group_leader_pid = selected->group_leader->pid;
+	else
+		event->group_leader_pid = -1;
+	event->min_flt = selected->min_flt;
+	event->maj_flt = selected->maj_flt;
+	event->oom_score_adj = selected->signal->oom_score_adj;
+	event->start_time = nsec_to_clock_t(selected->real_start_time);
+	event->rss_in_pages = rss_in_pages;
+	event->min_score_adj = min_score_adj;
+
+	event_buffer.head = (head + 1) & (MAX_BUFFERED_EVENTS - 1);
+
+	spin_unlock(&lmk_event_lock);
+
+	wake_up_interruptible(&event_wait);
+}
+
+static int lmk_event_show(struct seq_file *s, void *unused)
+{
+	struct lmk_event *events = (struct lmk_event *) event_buffer.buf;
+	int head;
+	int tail;
+	struct lmk_event *event;
+
+	spin_lock(&lmk_event_lock);
+
+	head = event_buffer.head;
+	tail = event_buffer.tail;
+
+	if (head == tail) {
+		spin_unlock(&lmk_event_lock);
+		return -EAGAIN;
+	}
+
+	event = &events[tail];
+
+	seq_printf(s, "%lu %lu %lu %lu %lu %lu %hd %hd %llu\n%s\n",
+		(unsigned long) event->pid, (unsigned long) event->uid,
+		(unsigned long) event->group_leader_pid, event->min_flt,
+		event->maj_flt, event->rss_in_pages, event->oom_score_adj,
+		event->min_score_adj, event->start_time, event->taskname);
+
+	event_buffer.tail = (tail + 1) & (MAX_BUFFERED_EVENTS - 1);
+
+	spin_unlock(&lmk_event_lock);
+	return 0;
+}
+
+static unsigned int lmk_event_poll(struct file *file, poll_table *wait)
+{
+	int ret = 0;
+
+	poll_wait(file, &event_wait, wait);
+	spin_lock(&lmk_event_lock);
+	if (event_buffer.head != event_buffer.tail)
+		ret = POLLIN;
+	spin_unlock(&lmk_event_lock);
+	return ret;
+}
+
+static int lmk_event_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lmk_event_show, inode->i_private);
+}
+
+static const struct file_operations event_file_ops = {
+	.open = lmk_event_open,
+	.poll = lmk_event_poll,
+	.read = seq_read
+};
+
+static void lmk_event_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	event_buffer.head = 0;
+	event_buffer.tail = 0;
+	event_buffer.buf = kmalloc(
+		sizeof(struct lmk_event) * MAX_BUFFERED_EVENTS, GFP_KERNEL);
+	if (!event_buffer.buf)
+		return;
+	entry = proc_create("lowmemorykiller", 0, NULL, &event_file_ops);
+	if (!entry)
+		pr_err("error creating kernel lmk event file\n");
+}
+
 static unsigned long lowmem_count(struct shrinker *s,
 				  struct shrink_control *sc)
 {
@@ -630,6 +785,7 @@
 
 		lowmem_deathpending_timeout = jiffies + HZ;
 		rem += selected_tasksize;
+		handle_lmk_event(selected, min_score_adj);
 		rcu_read_unlock();
 		/* give the system time to free up the memory */
 		msleep_interruptible(20);
@@ -657,6 +813,7 @@
 {
 	register_shrinker(&lowmem_shrinker);
 	vmpressure_notifier_register(&lmk_vmpr_nb);
+	lmk_event_init();
 	return 0;
 }
 device_initcall(lowmem_init);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb6376..35432fb 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -984,6 +984,8 @@
 			   unsigned int mask);
 unsigned int comedi_dio_update_state(struct comedi_subdevice *,
 				     unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+				       struct comedi_cmd *cmd);
 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
 				unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1736248..8ca5493 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -390,11 +390,13 @@
 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
 
 /**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
  * @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
  *
  * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
  *
  * For digital input, output or input/output subdevices, samples for
  * multiple channels are assumed to be packed into one or more unsigned
@@ -404,9 +406,9 @@
  *
  * Returns the overall scan length in bytes.
  */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+				       struct comedi_cmd *cmd)
 {
-	struct comedi_cmd *cmd = &s->async->cmd;
 	unsigned int num_samples;
 	unsigned int bits_per_sample;
 
@@ -423,6 +425,29 @@
 	}
 	return comedi_samples_to_bytes(s, num_samples);
 }
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag.  For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+	struct comedi_cmd *cmd = &s->async->cmd;
+
+	return comedi_bytes_per_scan_cmd(s, cmd);
+}
 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
 
 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0fa85d5..fe03a41 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3477,6 +3477,7 @@
 static int ni_cdio_cmdtest(struct comedi_device *dev,
 			   struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
+	unsigned int bytes_per_scan;
 	int err = 0;
 	int tmp;
 
@@ -3506,9 +3507,12 @@
 	err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
 					   cmd->chanlist_len);
-	err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
-					    s->async->prealloc_bufsz /
-					    comedi_bytes_per_scan(s));
+	bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+	if (bytes_per_scan) {
+		err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+						    s->async->prealloc_bufsz /
+						    bytes_per_scan);
+	}
 
 	if (err)
 		return 3;
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 5036eeb..2f174a3 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -472,10 +472,8 @@
 
 	size = usb_endpoint_maxp(devpriv->ep_tx);
 	devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-	if (!devpriv->usb_tx_buf) {
-		kfree(devpriv->usb_rx_buf);
+	if (!devpriv->usb_tx_buf)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -527,6 +525,9 @@
 	if (!devpriv)
 		return -ENOMEM;
 
+	mutex_init(&devpriv->mut);
+	usb_set_intfdata(intf, devpriv);
+
 	ret = ni6501_find_endpoints(dev);
 	if (ret)
 		return ret;
@@ -535,9 +536,6 @@
 	if (ret)
 		return ret;
 
-	mutex_init(&devpriv->mut);
-	usb_set_intfdata(intf, devpriv);
-
 	ret = comedi_alloc_subdevices(dev, 2);
 	if (ret)
 		return ret;
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index a004aed..1800eb3 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -691,10 +691,8 @@
 
 	size = usb_endpoint_maxp(devpriv->ep_tx);
 	devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-	if (!devpriv->usb_tx_buf) {
-		kfree(devpriv->usb_rx_buf);
+	if (!devpriv->usb_tx_buf)
 		return -ENOMEM;
-	}
 
 	return 0;
 }
@@ -809,6 +807,8 @@
 
 	devpriv->model = board->model;
 
+	sema_init(&devpriv->limit_sem, 8);
+
 	ret = vmk80xx_find_usb_endpoints(dev);
 	if (ret)
 		return ret;
@@ -817,8 +817,6 @@
 	if (ret)
 		return ret;
 
-	sema_init(&devpriv->limit_sem, 8);
-
 	usb_set_intfdata(intf, devpriv);
 
 	if (devpriv->model == VMK8055_MODEL)
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index e85c988..adea629 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -521,7 +521,7 @@
 
 	op = gb_operation_create(connection,
 				 GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
-				 sizeof(req), sizeof(*resp) + props_count *
+				 sizeof(*req), sizeof(*resp) + props_count *
 				 sizeof(struct gb_power_supply_props_desc),
 				 GFP_KERNEL);
 	if (!op)
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 4dc9ca3..b82a4ab 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -109,10 +109,10 @@
 #define AD7192_CH_AIN3		BIT(6) /* AIN3 - AINCOM */
 #define AD7192_CH_AIN4		BIT(7) /* AIN4 - AINCOM */
 
-#define AD7193_CH_AIN1P_AIN2M	0x000  /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M	0x001  /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M	0x002  /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M	0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M	0x001  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M	0x002  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M	0x004  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M	0x008  /* AIN7(+) - AIN8(-) */
 #define AD7193_CH_TEMP		0x100 /* Temp senseor */
 #define AD7193_CH_AIN2P_AIN2M	0x200 /* AIN2(+) - AIN2(-) */
 #define AD7193_CH_AIN1		0x401 /* AIN1 - AINCOM */
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe5..95f5be1 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -47,6 +47,8 @@
 #define ADT7516_MSB_AIN3		0xA
 #define ADT7516_MSB_AIN4		0xB
 #define ADT7316_DA_DATA_BASE		0x10
+#define ADT7316_DA_10_BIT_LSB_SHIFT	6
+#define ADT7316_DA_12_BIT_LSB_SHIFT	4
 #define ADT7316_DA_MSB_DATA_REGS	4
 #define ADT7316_LSB_DAC_A		0x10
 #define ADT7316_MSB_DAC_A		0x11
@@ -1089,7 +1091,7 @@
 		ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
 		if (data & 0x1)
 			ldac_config |= ADT7516_DAC_AB_IN_VREF;
-		else if (data & 0x2)
+		if (data & 0x2)
 			ldac_config |= ADT7516_DAC_CD_IN_VREF;
 	} else {
 		ret = kstrtou8(buf, 16, &data);
@@ -1411,7 +1413,7 @@
 static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
 		int channel, char *buf)
 {
-	u16 data;
+	u16 data = 0;
 	u8 msb, lsb, offset;
 	int ret;
 
@@ -1436,7 +1438,11 @@
 	if (ret)
 		return -EIO;
 
-	data = (msb << offset) + (lsb & ((1 << offset) - 1));
+	if (chip->dac_bits == 12)
+		data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
+	else if (chip->dac_bits == 10)
+		data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
+	data |= msb << offset;
 
 	return sprintf(buf, "%d\n", data);
 }
@@ -1444,7 +1450,7 @@
 static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
 		int channel, const char *buf, size_t len)
 {
-	u8 msb, lsb, offset;
+	u8 msb, lsb, lsb_reg, offset;
 	u16 data;
 	int ret;
 
@@ -1462,9 +1468,13 @@
 		return -EINVAL;
 
 	if (chip->dac_bits > 8) {
-		lsb = data & (1 << offset);
+		lsb = data & ((1 << offset) - 1);
+		if (chip->dac_bits == 12)
+			lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
+		else
+			lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
 		ret = chip->bus.write(chip->bus.client,
-			ADT7316_DA_DATA_BASE + channel * 2, lsb);
+			ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
 		if (ret)
 			return -EIO;
 	}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 9f61583..41b667c 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -158,17 +158,9 @@
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-	u32 val;
-	void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj	*pcmd);
 	struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-	if (pcmd->rsp && pcmd->rspsz > 0)
-		memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-	pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-	if (!pcmd_callback)
-		r8712_free_cmd_obj(pcmd);
-	else
-		pcmd_callback(padapter, pcmd);
+	r8712_free_cmd_obj(pcmd);
 	return H2C_SUCCESS;
 }
 
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 67e9e91..d10a59d 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -152,7 +152,7 @@
 static struct _cmd_callback	cmd_callback[] = {
 	{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
 	{GEN_CMD_CODE(_Write_MACREG), NULL},
-	{GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+	{GEN_CMD_CODE(_Read_BBREG), NULL},
 	{GEN_CMD_CODE(_Write_BBREG), NULL},
 	{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
 	{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ab96629..22e5116 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -977,8 +977,6 @@
 		return;
 	}
 
-	MACvIntDisable(priv->PortOffset);
-
 	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Read low level stats */
@@ -1067,8 +1065,6 @@
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
-
-	MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static void vnt_interrupt_work(struct work_struct *work)
@@ -1078,14 +1074,17 @@
 
 	if (priv->vif)
 		vnt_interrupt_process(priv);
+
+	MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static irqreturn_t vnt_interrupt(int irq,  void *arg)
 {
 	struct vnt_private *priv = arg;
 
-	if (priv->vif)
-		schedule_work(&priv->interrupt_work);
+	schedule_work(&priv->interrupt_work);
+
+	MACvIntDisable(priv->PortOffset);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 80205f3..b6c4f55 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4084,9 +4084,9 @@
 		struct se_cmd *se_cmd = &cmd->se_cmd;
 
 		if (se_cmd->se_tfo != NULL) {
-			spin_lock(&se_cmd->t_state_lock);
+			spin_lock_irq(&se_cmd->t_state_lock);
 			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
-			spin_unlock(&se_cmd->t_state_lock);
+			spin_unlock_irq(&se_cmd->t_state_lock);
 		}
 	}
 	spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 5836e55..d4c374c 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -20,6 +20,13 @@
 	INT3400_THERMAL_PASSIVE_1,
 	INT3400_THERMAL_ACTIVE,
 	INT3400_THERMAL_CRITICAL,
+	INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+	INT3400_THERMAL_EMERGENCY_CALL_MODE,
+	INT3400_THERMAL_PASSIVE_2,
+	INT3400_THERMAL_POWER_BOSS,
+	INT3400_THERMAL_VIRTUAL_SENSOR,
+	INT3400_THERMAL_COOLING_MODE,
+	INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
 	INT3400_THERMAL_MAXIMUM_UUID,
 };
 
@@ -27,6 +34,13 @@
 	"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
 	"3A95C389-E4B8-4629-A526-C52C88626BAE",
 	"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+	"63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+	"5349962F-71E6-431D-9AE8-0A635B710AEE",
+	"9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+	"F5A35014-C209-46A4-993A-EB56DE7530A1",
+	"6ED722A7-9240-48A5-B479-31EEF723D7CF",
+	"16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+	"BE84BABF-C4D4-403D-B495-3128FD44dAC1",
 };
 
 struct int3400_thermal_priv {
@@ -271,10 +285,9 @@
 
 	platform_set_drvdata(pdev, priv);
 
-	if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
-		int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
-		int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
-	}
+	int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+	int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
 	priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
 						priv, &int3400_thermal_ops,
 						&int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index b2bbaa1..1878810 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -43,7 +43,7 @@
 }
 
 static const struct x86_cpu_id soc_thermal_ids[] = {
-	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
 		BYT_SOC_DTS_APIC_IRQ},
 	{}
 };
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
old mode 100644
new mode 100755
index 53e3186..b5b3bfa
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -475,4 +475,27 @@
 	help
 	  Console support for OKL4 Microvisor virtual ttys.
 
+config LDISC_AUTOLOAD
+	bool "Automatically load TTY Line Disciplines"
+	default y
+	help
+	  Historically the kernel has always automatically loaded any
+	  line discipline that is in a kernel module when a user asks
+	  for it to be loaded with the TIOCSETD ioctl, or through other
+	  means.  This is not always the best thing to do on systems
+	  where you know you will not be using some of the more
+	  "ancient" line disciplines, so prevent the kernel from doing
+	  this unless the request is coming from a process with the
+	  CAP_SYS_MODULE permissions.
+
+	  Say 'Y' here if you trust your userspace users to do the right
+	  thing, or if you have only provided the line disciplines that
+	  you know you will be using, or if you wish to continue to use
+	  the traditional method of on-demand loading of these modules
+	  by any user.
+
+	  This functionality can be changed at runtime with the
+	  dev.tty.ldisc_autoload sysctl, this configuration option will
+	  only set the default value of this functionality.
+
 endif # TTY
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 7a8b5fc..f89dfde 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -97,6 +97,10 @@
 	if (of_property_read_u32(np, "reg-offset", &prop) == 0)
 		port->mapbase += prop;
 
+	/* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
+	if (of_device_is_compatible(np, "mrvl,mmp-uart"))
+		port->regshift = 2;
+
 	/* Check for registers offset within the devices address range */
 	if (of_property_read_u32(np, "reg-shift", &prop) == 0)
 		port->regshift = prop;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e82b347..2c38b3a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1330,6 +1330,30 @@
 	return setup_port(priv, port, bar, offset, board->reg_shift);
 }
 
+static int pci_pericom_setup(struct serial_private *priv,
+		  const struct pciserial_board *board,
+		  struct uart_8250_port *port, int idx)
+{
+	unsigned int bar, offset = board->first_offset, maxnr;
+
+	bar = FL_GET_BASE(board->flags);
+	if (board->flags & FL_BASE_BARS)
+		bar += idx;
+	else
+		offset += idx * board->uart_offset;
+
+	if (idx==3)
+		offset = 0x38;
+
+	maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+		(board->reg_shift + 3);
+
+	if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+		return 1;
+
+	return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
 static int
 ce4100_serial_setup(struct serial_private *priv,
 		  const struct pciserial_board *board,
@@ -2097,6 +2121,16 @@
 		.exit		= pci_plx9050_exit,
 	},
 	/*
+	 * Pericom (Only 7954 - It have a offset jump for port 4)
+	 */
+	{
+		.vendor		= PCI_VENDOR_ID_PERICOM,
+		.device		= PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.setup		= pci_pericom_setup,
+	},
+	/*
 	 * PLX
 	 */
 	{
@@ -2126,6 +2160,111 @@
 		.setup		= pci_default_setup,
 		.exit		= pci_plx9050_exit,
 	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
+	{
+		.vendor     = PCI_VENDOR_ID_ACCESIO,
+		.device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+		.subvendor  = PCI_ANY_ID,
+		.subdevice  = PCI_ANY_ID,
+		.setup      = pci_pericom_setup,
+	},
 	/*
 	 * SBS Technologies, Inc., PMC-OCTALPRO 232
 	 */
@@ -4976,10 +5115,10 @@
 	 */
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
@@ -4988,10 +5127,10 @@
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
@@ -5000,10 +5139,10 @@
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
@@ -5012,13 +5151,13 @@
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7951 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
@@ -5027,16 +5166,16 @@
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7954 },
@@ -5045,13 +5184,13 @@
 		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7954 },
+		pbn_pericom_PI7C9X7952 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7958 },
+		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7958 },
+		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7958 },
@@ -5060,19 +5199,19 @@
 		pbn_pericom_PI7C9X7958 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7958 },
+		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7958 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7958 },
+		pbn_pericom_PI7C9X7954 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		pbn_pericom_PI7C9X7958 },
 	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		pbn_pericom_PI7C9X7958 },
+		pbn_pericom_PI7C9X7954 },
 	/*
 	 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
 	 */
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 73137f4..d446251 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -52,11 +52,6 @@
 	struct clk		*clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-	return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
 					    int offset)
 {
@@ -511,6 +506,7 @@
 	.verify_port	= ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -607,14 +603,7 @@
 	.index		= -1,
 	.data		= &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-	if (!ar933x_uart_console_enabled())
-		return;
-
-	ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
 	.owner		= THIS_MODULE,
@@ -703,7 +692,9 @@
 	baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
 	up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-	ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_console_ports[up->port.line] = up;
+#endif
 
 	ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
 	if (ret)
@@ -752,8 +743,9 @@
 {
 	int ret;
 
-	if (ar933x_uart_console_enabled())
-		ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+	ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
 	ret = uart_register_driver(&ar933x_uart_driver);
 	if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 5a341b1..ef688aa 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -175,6 +175,8 @@
 	unsigned int		pending_status;
 	spinlock_t		lock_suspended;
 
+	bool			hd_start_rx;	/* can start RX during half-duplex operation */
+
 	int (*prepare_rx)(struct uart_port *port);
 	int (*prepare_tx)(struct uart_port *port);
 	void (*schedule_rx)(struct uart_port *port);
@@ -241,6 +243,12 @@
 
 #endif
 
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+	return (port->rs485.flags & SER_RS485_ENABLED) &&
+		!(port->rs485.flags & SER_RS485_RX_DURING_TX);
+}
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -492,9 +500,9 @@
 	/* Disable interrupts */
 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-	if ((port->rs485.flags & SER_RS485_ENABLED) &&
-	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+	if (atmel_uart_is_half_duplex(port))
 		atmel_start_rx(port);
+
 }
 
 /*
@@ -511,8 +519,7 @@
 		return;
 
 	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
-		if ((port->rs485.flags & SER_RS485_ENABLED) &&
-		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+		if (atmel_uart_is_half_duplex(port))
 			atmel_stop_rx(port);
 
 	if (atmel_use_pdc_tx(port))
@@ -809,10 +816,14 @@
 	 */
 	if (!uart_circ_empty(xmit))
 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
-	else if ((port->rs485.flags & SER_RS485_ENABLED) &&
-		 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
-		/* DMA done, stop TX, start RX for RS485 */
-		atmel_start_rx(port);
+	else if (atmel_uart_is_half_duplex(port)) {
+		/*
+		 * DMA done, re-enable TXEMPTY and signal that we can stop
+		 * TX and start RX for RS485
+		 */
+		atmel_port->hd_start_rx = true;
+		atmel_uart_writel(port, ATMEL_US_IER,
+				  atmel_port->tx_done_mask);
 	}
 
 	spin_unlock_irqrestore(&port->lock, flags);
@@ -1166,6 +1177,10 @@
 					 sg_dma_len(&atmel_port->sg_rx)/2,
 					 DMA_DEV_TO_MEM,
 					 DMA_PREP_INTERRUPT);
+	if (!desc) {
+		dev_err(port->dev, "Preparing DMA cyclic failed\n");
+		goto chan_err;
+	}
 	desc->callback = atmel_complete_rx_dma;
 	desc->callback_param = port;
 	atmel_port->desc_rx = desc;
@@ -1253,9 +1268,20 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	if (pending & atmel_port->tx_done_mask) {
-		/* Either PDC or interrupt transmission */
 		atmel_uart_writel(port, ATMEL_US_IDR,
 				  atmel_port->tx_done_mask);
+
+		/* Start RX if flag was set and FIFO is empty */
+		if (atmel_port->hd_start_rx) {
+			if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+					& ATMEL_US_TXEMPTY))
+				dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+			atmel_port->hd_start_rx = false;
+			atmel_start_rx(port);
+			return;
+		}
+
 		atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
 	}
 }
@@ -1382,8 +1408,7 @@
 		atmel_uart_writel(port, ATMEL_US_IER,
 				  atmel_port->tx_done_mask);
 	} else {
-		if ((port->rs485.flags & SER_RS485_ENABLED) &&
-		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+		if (atmel_uart_is_half_duplex(port)) {
 			/* DMA done, stop TX, start RX for RS485 */
 			atmel_start_rx(port);
 		}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6c0c6c5d..ec24e82 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@
 	char *cptr = config;
 	struct console *cons;
 
-	if (!strlen(config) || isspace(config[0]))
+	if (!strlen(config) || isspace(config[0])) {
+		err = 0;
 		goto noconfig;
+	}
 
 	kgdboc_io_ops.is_console = 0;
 	kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8a3e926..5331baf 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1323,6 +1323,8 @@
 	if (spi->dev.of_node) {
 		const struct of_device_id *of_id =
 			of_match_device(max310x_dt_ids, &spi->dev);
+		if (!of_id)
+			return -ENODEV;
 
 		devtype = (struct max310x_devtype *)of_id->data;
 	} else {
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 7d80b8a..c91a47b 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -130,6 +130,15 @@
 
 #define DMA_RX_BUF_SIZE		(2048)
 #define UART_CONSOLE_RX_WM	(2)
+
+struct msm_geni_serial_ver_info {
+	int hw_major_ver;
+	int hw_minor_ver;
+	int hw_step_ver;
+	int m_fw_ver;
+	int s_fw_ver;
+};
+
 struct msm_geni_serial_port {
 	struct uart_port uport;
 	char name[20];
@@ -166,6 +175,7 @@
 	int ioctl_count;
 	int edge_count;
 	bool manual_flow;
+	struct msm_geni_serial_ver_info ver_info;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -189,6 +199,7 @@
 static void msm_geni_serial_stop_rx(struct uart_port *uport);
 static int msm_geni_serial_runtime_resume(struct device *dev);
 static int msm_geni_serial_runtime_suspend(struct device *dev);
+static int msm_geni_serial_get_ver_info(struct uart_port *uport);
 
 static atomic_t uart_line_id = ATOMIC_INIT(0);
 
@@ -319,7 +330,7 @@
 static int vote_clock_on(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-	int usage_count = atomic_read(&uport->dev->power.usage_count);
+	int usage_count;
 	int ret = 0;
 
 	ret = msm_geni_serial_power_on(uport);
@@ -328,15 +339,18 @@
 		return ret;
 	}
 	port->ioctl_count++;
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
-		__func__, current->comm, port->ioctl_count, usage_count);
+	usage_count = atomic_read(&uport->dev->power.usage_count);
+	IPC_LOG_MSG(port->ipc_log_pwr,
+		"%s :%s ioctl:%d usage_count:%d edge-Count:%d\n",
+		__func__, current->comm, port->ioctl_count,
+		usage_count, port->edge_count);
 	return 0;
 }
 
 static int vote_clock_off(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
-	int usage_count = atomic_read(&uport->dev->power.usage_count);
+	int usage_count;
 
 	if (!pm_runtime_enabled(uport->dev)) {
 		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -353,7 +367,8 @@
 	wait_for_transfers_inflight(uport);
 	port->ioctl_count--;
 	msm_geni_serial_power_off(uport);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+	usage_count = atomic_read(&uport->dev->power.usage_count);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s:%s ioctl:%d usage_count:%d\n",
 		__func__, current->comm, port->ioctl_count, usage_count);
 	return 0;
 };
@@ -388,7 +403,8 @@
 
 	if (!uart_console(uport) && device_pending_suspend(uport)) {
 		IPC_LOG_MSG(port->ipc_log_misc,
-				"%s.Device is suspended.\n", __func__);
+				"%s.Device is suspended, %s\n",
+				__func__, current->comm);
 		return;
 	}
 
@@ -411,9 +427,14 @@
 {
 	u32 geni_ios = 0;
 	unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
+	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	if (device_pending_suspend(uport))
+	if (!uart_console(uport) && device_pending_suspend(uport)) {
+		IPC_LOG_MSG(port->ipc_log_misc,
+				"%s.Device is suspended, %s\n",
+				__func__, current->comm);
 		return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+	}
 
 	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
 	if (!(geni_ios & IO2_DATA_IN))
@@ -435,7 +456,8 @@
 
 	if (device_pending_suspend(uport)) {
 		IPC_LOG_MSG(port->ipc_log_misc,
-				"%s.Device is suspended.\n", __func__);
+			"%s.Device is suspended, %s: mctrl=0x%x\n",
+			 __func__, current->comm, mctrl);
 		return;
 	}
 	if (!(mctrl & TIOCM_RTS)) {
@@ -448,8 +470,10 @@
 							SE_UART_MANUAL_RFR);
 	/* Write to flow control must complete before return to client*/
 	mb();
-	IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual_rfr 0x%x\n",
-						__func__, uart_manual_rfr);
+	IPC_LOG_MSG(port->ipc_log_misc,
+			"%s:%s, mctrl=0x%x, manual_rfr=0x%x, flow=%s\n",
+			__func__, current->comm, mctrl, uart_manual_rfr,
+			(port->manual_flow ? "OFF" : "ON"));
 }
 
 static const char *msm_geni_serial_get_type(struct uart_port *uport)
@@ -879,6 +903,7 @@
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	unsigned int geni_status;
 	unsigned int geni_ios;
+	static unsigned int ios_log_limit;
 
 	if (!uart_console(uport) && !pm_runtime_active(uport->dev)) {
 		IPC_LOG_MSG(msm_port->ipc_log_misc,
@@ -921,9 +946,11 @@
 	return;
 check_flow_ctrl:
 	geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
-	if (!(geni_ios & IO2_DATA_IN))
+	if (++ios_log_limit % 5 == 0) {
 		IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: ios: 0x%08x\n",
-							__func__, geni_ios);
+						__func__, geni_ios);
+		ios_log_limit = 0;
+	}
 exit_start_tx:
 	if (!uart_console(uport))
 		msm_geni_serial_power_off(uport);
@@ -1058,9 +1085,10 @@
 	 * go through.
 	 */
 	mb();
-	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
 exit_start_rx_sequencer:
-	IPC_LOG_MSG(port->ipc_log_misc, "%s 0x%x\n", __func__, geni_status);
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x, dma_dbg:0x%x\n", __func__,
+		geni_status, geni_read_reg(uport->membase, SE_DMA_DEBUG_REG0));
 }
 
 static void msm_geni_serial_start_rx(struct uart_port *uport)
@@ -1158,6 +1186,8 @@
 						      DMA_RX_BUF_SIZE);
 		port->rx_dma = (dma_addr_t)NULL;
 	}
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+	IPC_LOG_MSG(port->ipc_log_misc, "%s: 0x%x\n", __func__, geni_status);
 }
 
 static void msm_geni_serial_stop_rx(struct uart_port *uport)
@@ -1697,16 +1727,6 @@
 		}
 	}
 
-	if (unlikely(get_se_proto(uport->membase) != UART)) {
-		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
-				 __func__, get_se_proto(uport->membase));
-		ret = -ENXIO;
-		goto exit_startup;
-	}
-	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: FW Ver:0x%x%x\n",
-		__func__,
-		get_se_m_fw(uport->membase), get_se_s_fw(uport->membase));
-
 	get_tx_fifo_size(msm_port);
 	if (!msm_port->port_setup) {
 		if (msm_geni_serial_port_setup(uport))
@@ -2032,6 +2052,23 @@
 static DEVICE_ATTR(xfer_mode, 0644, msm_geni_serial_xfer_mode_show,
 					msm_geni_serial_xfer_mode_store);
 
+static ssize_t ver_info_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+	ssize_t ret = 0;
+	int len = (sizeof(struct msm_geni_serial_ver_info) * 2);
+
+	ret = snprintf(buf, len, "FW ver=0x%x%x, HW ver=%d.%d.%d\n",
+		port->ver_info.m_fw_ver, port->ver_info.m_fw_ver,
+		port->ver_info.hw_major_ver, port->ver_info.hw_minor_ver,
+		port->ver_info.hw_step_ver);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(ver_info);
+
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static int __init msm_geni_console_setup(struct console *co, char *options)
 {
@@ -2329,6 +2366,43 @@
 	{},
 };
 
+static int msm_geni_serial_get_ver_info(struct uart_port *uport)
+{
+	int hw_ver, ret = 0;
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	se_geni_clks_on(&msm_port->serial_rsc);
+	/* Basic HW and FW info */
+	if (unlikely(get_se_proto(uport->membase) != UART)) {
+		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
+			 __func__, get_se_proto(uport->membase));
+		ret = -ENXIO;
+		goto exit_ver_info;
+	}
+
+	msm_port->ver_info.m_fw_ver = get_se_m_fw(uport->membase);
+	msm_port->ver_info.s_fw_ver = get_se_s_fw(uport->membase);
+	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: FW Ver:0x%x%x\n",
+		__func__,
+		msm_port->ver_info.m_fw_ver, msm_port->ver_info.s_fw_ver);
+
+	hw_ver = geni_se_qupv3_hw_version(msm_port->wrapper_dev,
+		&msm_port->ver_info.hw_major_ver,
+		&msm_port->ver_info.hw_minor_ver,
+		&msm_port->ver_info.hw_step_ver);
+	if (hw_ver)
+		dev_err(uport->dev, "%s:Err getting HW version %d\n",
+						__func__, hw_ver);
+	else
+		IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: HW Ver:%x.%x.%x\n",
+			__func__, msm_port->ver_info.hw_major_ver,
+			msm_port->ver_info.hw_minor_ver,
+			msm_port->ver_info.hw_step_ver);
+exit_ver_info:
+	se_geni_clks_off(&msm_port->serial_rsc);
+	return ret;
+}
+
 static int msm_geni_serial_probe(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -2540,8 +2614,12 @@
 				line, uport->fifosize, is_console);
 	device_create_file(uport->dev, &dev_attr_loopback);
 	device_create_file(uport->dev, &dev_attr_xfer_mode);
+	device_create_file(uport->dev, &dev_attr_ver_info);
 	msm_geni_serial_debug_init(uport, is_console);
 	dev_port->port_setup = false;
+	ret = msm_geni_serial_get_ver_info(uport);
+	if (ret)
+		goto exit_geni_serial_probe;
 	return uart_add_one_port(drv, uport);
 
 exit_geni_serial_probe:
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index ea6b62c..82451bb6 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1482,7 +1482,7 @@
 	ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-		return ret;
+		goto err_i2c;
 	}
 #endif
 
@@ -1490,10 +1490,18 @@
 	ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
 	if (ret < 0) {
 		pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-		return ret;
+		goto err_spi;
 	}
 #endif
 	return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+	i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+	uart_unregister_driver(&sc16is7xx_uart);
+	return ret;
 }
 module_init(sc16is7xx_init);
 
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 6ff53b6..bcb9979 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -834,19 +834,9 @@
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
-	if (uart_circ_empty(xmit)) {
+	if (uart_circ_empty(xmit))
 		sci_stop_tx(port);
-	} else {
-		ctrl = serial_port_in(port, SCSCR);
 
-		if (port->type != PORT_SCI) {
-			serial_port_in(port, SCxSR); /* Dummy read */
-			sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
-		}
-
-		ctrl |= SCSCR_TIE;
-		serial_port_out(port, SCSCR, ctrl);
-	}
 }
 
 /* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 699447a..747560f 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -36,7 +36,7 @@
 #define SPRD_FIFO_SIZE		128
 #define SPRD_DEF_RATE		26000000
 #define SPRD_BAUD_IO_LIMIT	3000000
-#define SPRD_TIMEOUT		256
+#define SPRD_TIMEOUT		256000
 
 /* the offset of serial registers and BITs for them */
 /* data registers */
@@ -63,6 +63,7 @@
 
 /* interrupt clear register */
 #define SPRD_ICLR		0x0014
+#define SPRD_ICLR_TIMEOUT	BIT(13)
 
 /* line control register */
 #define SPRD_LCR		0x0018
@@ -298,7 +299,8 @@
 		return IRQ_NONE;
 	}
 
-	serial_out(port, SPRD_ICLR, ~0);
+	if (ims & SPRD_IMSR_TIMEOUT)
+		serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
 
 	if (ims & (SPRD_IMSR_RX_FIFO_FULL |
 		SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT))
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 7333d64..eb61a07 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -362,7 +362,13 @@
 		cdns_uart_handle_tx(dev_id);
 		isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
 	}
-	if (isrstatus & CDNS_UART_IXR_RXMASK)
+
+	/*
+	 * Skip RX processing if RX is disabled as RXEMPTY will never be set
+	 * as read bytes will not be removed from the FIFO.
+	 */
+	if (isrstatus & CDNS_UART_IXR_RXMASK &&
+	    !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
 		cdns_uart_handle_rx(dev_id, isrstatus);
 
 	spin_unlock(&port->lock);
@@ -1255,7 +1261,7 @@
  *
  * Return: 0 on success, negative errno otherwise.
  */
-static int __init cdns_uart_console_setup(struct console *co, char *options)
+static int cdns_uart_console_setup(struct console *co, char *options)
 {
 	struct uart_port *port = &cdns_uart_port[co->index];
 	int baud = 9600;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 41b9a7c..ca9c82e 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -25,7 +25,7 @@
  * Byte threshold to limit memory consumption for flip buffers.
  * The actual memory limit is > 2x this amount.
  */
-#define TTYB_DEFAULT_MEM_LIMIT	65536
+#define TTYB_DEFAULT_MEM_LIMIT	(640 * 1024UL)
 
 /*
  * We default to dicing tty buffer allocations to this many characters
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 84f7b83..08e47de 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -520,6 +520,8 @@
 	tty_kref_put(tty);
 }
 
+extern void tty_sysctl_init(void);
+
 /**
  * proc_set_tty -  set the controlling terminal
  *
@@ -3709,6 +3711,7 @@
  */
 int __init tty_init(void)
 {
+	tty_sysctl_init();
 	cdev_init(&tty_cdev, &tty_fops);
 	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4ab518d..3eb3f2a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -155,6 +155,13 @@
  *		takes tty_ldiscs_lock to guard against ldisc races
  */
 
+#if defined(CONFIG_LDISC_AUTOLOAD)
+	#define INITIAL_AUTOLOAD_STATE	1
+#else
+	#define INITIAL_AUTOLOAD_STATE	0
+#endif
+static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+
 static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
 {
 	struct tty_ldisc *ld;
@@ -169,6 +176,8 @@
 	 */
 	ldops = get_ldops(disc);
 	if (IS_ERR(ldops)) {
+		if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
+			return ERR_PTR(-EPERM);
 		request_module("tty-ldisc-%d", disc);
 		ldops = get_ldops(disc);
 		if (IS_ERR(ldops))
@@ -774,3 +783,41 @@
 		tty_ldisc_put(tty->ldisc);
 	tty->ldisc = NULL;
 }
+
+static int zero;
+static int one = 1;
+static struct ctl_table tty_table[] = {
+	{
+		.procname	= "ldisc_autoload",
+		.data		= &tty_ldisc_autoload,
+		.maxlen		= sizeof(tty_ldisc_autoload),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{ }
+};
+
+static struct ctl_table tty_dir_table[] = {
+	{
+		.procname	= "tty",
+		.mode		= 0555,
+		.child		= tty_table,
+	},
+	{ }
+};
+
+static struct ctl_table tty_root_table[] = {
+	{
+		.procname	= "dev",
+		.mode		= 0555,
+		.child		= tty_dir_table,
+	},
+	{ }
+};
+
+void tty_sysctl_init(void)
+{
+	register_sysctl_table(tty_root_table);
+}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index ece10e6..e8a917a 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -121,6 +121,7 @@
 static struct input_handler kbd_handler;
 static DEFINE_SPINLOCK(kbd_event_lock);
 static DEFINE_SPINLOCK(led_lock);
+static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf'  and friends */
 static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)];	/* keyboard key bitmap */
 static unsigned char shift_down[NR_SHIFT];		/* shift state counters.. */
 static bool dead_key_next;
@@ -1959,11 +1960,12 @@
 	char *p;
 	u_char *q;
 	u_char __user *up;
-	int sz;
+	int sz, fnw_sz;
 	int delta;
 	char *first_free, *fj, *fnw;
 	int i, j, k;
 	int ret;
+	unsigned long flags;
 
 	if (!capable(CAP_SYS_TTY_CONFIG))
 		perm = 0;
@@ -2006,7 +2008,14 @@
 			goto reterr;
 		}
 
+		fnw = NULL;
+		fnw_sz = 0;
+		/* race aginst other writers */
+		again:
+		spin_lock_irqsave(&func_buf_lock, flags);
 		q = func_table[i];
+
+		/* fj pointer to next entry after 'q' */
 		first_free = funcbufptr + (funcbufsize - funcbufleft);
 		for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
 			;
@@ -2014,10 +2023,12 @@
 			fj = func_table[j];
 		else
 			fj = first_free;
-
+		/* buffer usage increase by new entry */
 		delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
+
 		if (delta <= funcbufleft) { 	/* it fits in current buf */
 		    if (j < MAX_NR_FUNC) {
+			/* make enough space for new entry at 'fj' */
 			memmove(fj + delta, fj, first_free - fj);
 			for (k = j; k < MAX_NR_FUNC; k++)
 			    if (func_table[k])
@@ -2030,20 +2041,28 @@
 		    sz = 256;
 		    while (sz < funcbufsize - funcbufleft + delta)
 		      sz <<= 1;
-		    fnw = kmalloc(sz, GFP_KERNEL);
-		    if(!fnw) {
-		      ret = -ENOMEM;
-		      goto reterr;
+		    if (fnw_sz != sz) {
+		      spin_unlock_irqrestore(&func_buf_lock, flags);
+		      kfree(fnw);
+		      fnw = kmalloc(sz, GFP_KERNEL);
+		      fnw_sz = sz;
+		      if (!fnw) {
+			ret = -ENOMEM;
+			goto reterr;
+		      }
+		      goto again;
 		    }
 
 		    if (!q)
 		      func_table[i] = fj;
+		    /* copy data before insertion point to new location */
 		    if (fj > funcbufptr)
 			memmove(fnw, funcbufptr, fj - funcbufptr);
 		    for (k = 0; k < j; k++)
 		      if (func_table[k])
 			func_table[k] = fnw + (func_table[k] - funcbufptr);
 
+		    /* copy data after insertion point to new location */
 		    if (first_free > fj) {
 			memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
 			for (k = j; k < MAX_NR_FUNC; k++)
@@ -2056,7 +2075,9 @@
 		    funcbufleft = funcbufleft - delta + sz - funcbufsize;
 		    funcbufsize = sz;
 		}
+		/* finally insert item itself */
 		strcpy(func_table[i], kbs->kb_string);
+		spin_unlock_irqrestore(&func_buf_lock, flags);
 		break;
 	}
 	ret = 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 64c6af2c..e96e3a5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -901,8 +901,15 @@
 	} else if (ci->platdata->usb_phy) {
 		ci->usb_phy = ci->platdata->usb_phy;
 	} else {
+		ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys",
+							  0);
 		ci->phy = devm_phy_get(dev->parent, "usb-phy");
-		ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
+
+		/* Fallback to grabbing any registered USB2 PHY */
+		if (IS_ERR(ci->usb_phy) &&
+		    PTR_ERR(ci->usb_phy) != -EPROBE_DEFER)
+			ci->usb_phy = devm_usb_get_phy(dev->parent,
+						       USB_PHY_TYPE_USB2);
 
 		/* if both generic PHY and USB PHY layers aren't enabled */
 		if (PTR_ERR(ci->phy) == -ENOSYS &&
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 5ef8da6..64c7640 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -148,6 +148,8 @@
 
 	do {
 		controller = of_find_node_with_property(controller, "phys");
+		if (!of_device_is_available(controller))
+			continue;
 		index = 0;
 		do {
 			if (arg0 == -1) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index f794741..876679a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -763,21 +763,18 @@
 		return;
 
 	if (dev->rawdescriptors) {
-		for (i = 0; i < dev->descriptor.bNumConfigurations &&
-				i < USB_MAXCONFIG; i++)
+		for (i = 0; i < dev->descriptor.bNumConfigurations; i++)
 			kfree(dev->rawdescriptors[i]);
 
 		kfree(dev->rawdescriptors);
 		dev->rawdescriptors = NULL;
 	}
 
-	for (c = 0; c < dev->descriptor.bNumConfigurations &&
-			c < USB_MAXCONFIG; c++) {
+	for (c = 0; c < dev->descriptor.bNumConfigurations; c++) {
 		struct usb_host_config *cf = &dev->config[c];
 
 		kfree(cf->string);
-		for (i = 0; i < cf->desc.bNumInterfaces &&
-				i < USB_MAXINTERFACES; i++) {
+		for (i = 0; i < cf->desc.bNumInterfaces; i++) {
 			if (cf->intf_cache[i])
 				kref_put(&cf->intf_cache[i]->ref,
 					  usb_release_interface_cache);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 7dae981..8089e58 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -470,11 +470,6 @@
 		pm_runtime_disable(dev);
 	pm_runtime_set_suspended(dev);
 
-	/* Undo any residual pm_autopm_get_interface_* calls */
-	for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
-		usb_autopm_put_interface_no_suspend(intf);
-	atomic_set(&intf->pm_usage_cnt, 0);
-
 	if (!error)
 		usb_autosuspend_device(udev);
 
@@ -1638,7 +1633,6 @@
 	int			status;
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	status = pm_runtime_put_sync(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1667,7 +1661,6 @@
 	int			status;
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	status = pm_runtime_put(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1689,7 +1682,6 @@
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
 	usb_mark_last_busy(udev);
-	atomic_dec(&intf->pm_usage_cnt);
 	pm_runtime_put_noidle(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1720,8 +1712,6 @@
 	status = pm_runtime_get_sync(&intf->dev);
 	if (status < 0)
 		pm_runtime_put_sync(&intf->dev);
-	else
-		atomic_inc(&intf->pm_usage_cnt);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
 			status);
@@ -1755,8 +1745,6 @@
 	status = pm_runtime_get(&intf->dev);
 	if (status < 0 && status != -EINPROGRESS)
 		pm_runtime_put_noidle(&intf->dev);
-	else
-		atomic_inc(&intf->pm_usage_cnt);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
 			status);
@@ -1780,7 +1768,6 @@
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
 	usb_mark_last_busy(udev);
-	atomic_inc(&intf->pm_usage_cnt);
 	pm_runtime_get_noresume(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
@@ -1901,14 +1888,11 @@
 	return -EBUSY;
 }
 
-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
 {
 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 	int ret = -EPERM;
 
-	if (enable && !udev->usb2_hw_lpm_allowed)
-		return 0;
-
 	if (hcd->driver->set_usb2_hw_lpm) {
 		ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
 		if (!ret)
@@ -1918,6 +1902,24 @@
 	return ret;
 }
 
+int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	if (!udev->usb2_hw_lpm_capable ||
+	    !udev->usb2_hw_lpm_allowed ||
+	    udev->usb2_hw_lpm_enabled)
+		return 0;
+
+	return usb_set_usb2_hardware_lpm(udev, 1);
+}
+
+int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	if (!udev->usb2_hw_lpm_enabled)
+		return 0;
+
+	return usb_set_usb2_hardware_lpm(udev, 0);
+}
+
 #endif /* CONFIG_PM */
 
 struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0ff6ae3..d6c3df6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3179,8 +3179,7 @@
 	}
 
 	/* disable USB2 hardware LPM */
-	if (udev->usb2_hw_lpm_enabled == 1)
-		usb_set_usb2_hardware_lpm(udev, 0);
+	usb_disable_usb2_hardware_lpm(udev);
 
 	if (usb_disable_ltm(udev)) {
 		dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
@@ -3226,8 +3225,7 @@
 		usb_enable_ltm(udev);
  err_ltm:
 		/* Try to enable USB2 hardware LPM again */
-		if (udev->usb2_hw_lpm_capable == 1)
-			usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 
 		if (udev->do_remote_wakeup)
 			(void) usb_disable_remote_wakeup(udev);
@@ -3512,8 +3510,7 @@
 		hub_port_logical_disconnect(hub, port1);
 	} else  {
 		/* Try to enable USB2 hardware LPM */
-		if (udev->usb2_hw_lpm_capable == 1)
-			usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 
 		/* Try to enable USB3 LTM and LPM */
 		usb_enable_ltm(udev);
@@ -4350,7 +4347,7 @@
 	if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
 			connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
 		udev->usb2_hw_lpm_allowed = 1;
-		usb_set_usb2_hardware_lpm(udev, 1);
+		usb_enable_usb2_hardware_lpm(udev);
 	}
 }
 
@@ -5516,8 +5513,7 @@
 	/* Disable USB2 hardware LPM.
 	 * It will be re-enabled by the enumeration process.
 	 */
-	if (udev->usb2_hw_lpm_enabled == 1)
-		usb_set_usb2_hardware_lpm(udev, 0);
+	usb_disable_usb2_hardware_lpm(udev);
 
 	/* Disable LPM and LTM while we reset the device and reinstall the alt
 	 * settings.  Device-initiated LPM settings, and system exit latency
@@ -5627,7 +5623,7 @@
 
 done:
 	/* Now that the alt settings are re-installed, enable LTM and LPM. */
-	usb_set_usb2_hardware_lpm(udev, 1);
+	usb_enable_usb2_hardware_lpm(udev);
 	usb_unlocked_enable_lpm(udev);
 	usb_enable_ltm(udev);
 	usb_release_bos_descriptor(udev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 255fecc..2e5b330 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -817,9 +817,11 @@
 
 	if (dev->state == USB_STATE_SUSPENDED)
 		return -EHOSTUNREACH;
-	if (size <= 0 || !buf || !index)
+	if (size <= 0 || !buf)
 		return -EINVAL;
 	buf[0] = 0;
+	if (index <= 0 || index >= 256)
+		return -EINVAL;
 	tbuf = kmalloc(256, GFP_NOIO);
 	if (!tbuf)
 		return -ENOMEM;
@@ -1181,8 +1183,7 @@
 			dev->actconfig->interface[i] = NULL;
 		}
 
-		if (dev->usb2_hw_lpm_enabled == 1)
-			usb_set_usb2_hardware_lpm(dev, 0);
+		usb_disable_usb2_hardware_lpm(dev);
 		usb_unlocked_disable_lpm(dev);
 		usb_disable_ltm(dev);
 
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c953a0f..1a232b4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -494,7 +494,10 @@
 
 	if (!ret) {
 		udev->usb2_hw_lpm_allowed = value;
-		ret = usb_set_usb2_hardware_lpm(udev, value);
+		if (value)
+			ret = usb_enable_usb2_hardware_lpm(udev);
+		else
+			ret = usb_disable_usb2_hardware_lpm(udev);
 	}
 
 	usb_unlock_device(udev);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index fbff25f..dde0e99 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -84,7 +84,8 @@
 extern int usb_runtime_suspend(struct device *dev);
 extern int usb_runtime_resume(struct device *dev);
 extern int usb_runtime_idle(struct device *dev);
-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
+extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
+extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
 
 #else
 
@@ -104,7 +105,12 @@
 	return 0;
 }
 
-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+	return 0;
+}
+
+static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
 {
 	return 0;
 }
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 14c18f3..b392577 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -329,6 +329,7 @@
 			gi->composite.gadget_driver.udc_name = NULL;
 			goto err;
 		}
+		schedule_work(&gi->work);
 	}
 	mutex_unlock(&gi->lock);
 	return len;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
old mode 100644
new mode 100755
index 15c67c1..8bcc6ff
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1188,9 +1188,12 @@
 			 */
 			if (epfile->ep == ep) {
 				usb_ep_dequeue(ep->ep, req);
+				spin_unlock_irq(&epfile->ffs->eps_lock);
+				wait_for_completion(done);
 				interrupted = ep->status < 0;
+			} else {
+				spin_unlock_irq(&epfile->ffs->eps_lock);
 			}
-			spin_unlock_irq(&epfile->ffs->eps_lock);
 		}
 
 		if (interrupted) {
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 5815120..8e83649 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -340,20 +340,20 @@
 	req->complete = f_hidg_req_complete;
 	req->context  = hidg;
 
+	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
 	status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
 	if (status < 0) {
 		ERROR(hidg->func.config->cdev,
 			"usb_ep_queue error on int endpoint %zd\n", status);
-		goto release_write_pending_unlocked;
+		goto release_write_pending;
 	} else {
 		status = count;
 	}
-	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
 	return status;
 release_write_pending:
 	spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
 	hidg->write_pending = 0;
 	spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 40396a2..f57d293 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -958,6 +958,7 @@
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
 		return -EINVAL;
 	}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 7a8c366..dfaed8e 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -870,9 +870,6 @@
 	(void) readl(&ep->dev->pci->pcimstctl);
 
 	writel(BIT(DMA_START), &dma->dmastat);
-
-	if (!ep->is_in)
-		stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -911,6 +908,7 @@
 			writel(BIT(DMA_START), &dma->dmastat);
 			return;
 		}
+		stop_out_naking(ep);
 	}
 
 	tmp = dmactl_default;
@@ -1279,9 +1277,9 @@
 			break;
 	}
 	if (&req->req != _req) {
+		ep->stopped = stopped;
 		spin_unlock_irqrestore(&ep->dev->lock, flags);
-		dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-								__func__);
+		ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
 		return -EINVAL;
 	}
 
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index b09392f..37acb0e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -779,12 +779,12 @@
 	atomic_inc(&urb->use_count);
 	atomic_inc(&urb->dev->urbnum);
 	urb->setup_dma = dma_map_single(
-			hcd->self.controller,
+			hcd->self.sysdev,
 			urb->setup_packet,
 			sizeof(struct usb_ctrlrequest),
 			DMA_TO_DEVICE);
 	urb->transfer_dma = dma_map_single(
-			hcd->self.controller,
+			hcd->self.sysdev,
 			urb->transfer_buffer,
 			urb->transfer_buffer_length,
 			DMA_FROM_DEVICE);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 4361897..3efb7b0 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3208,6 +3208,9 @@
 	printk(KERN_INFO "driver %s\n", hcd_name);
 	workqueue = create_singlethread_workqueue("u132");
 	retval = platform_driver_register(&u132_platform_driver);
+	if (retval)
+		destroy_workqueue(workqueue);
+
 	return retval;
 }
 
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 0e4535e..64ee815 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -192,5 +192,6 @@
 			xhci_rcar_is_gen3(hcd->self.controller))
 		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
 
+	xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 	return xhci_rcar_download_firmware(hcd);
 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 32cb995..fe5db15 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1635,10 +1635,13 @@
 		}
 	}
 
-	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
-			DEV_SUPERSPEED_ANY(temp)) {
+	if ((temp & PORT_PLC) &&
+	    DEV_SUPERSPEED_ANY(temp) &&
+	    ((temp & PORT_PLS_MASK) == XDEV_U0 ||
+	     (temp & PORT_PLS_MASK) == XDEV_U1 ||
+	     (temp & PORT_PLS_MASK) == XDEV_U2)) {
 		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
-		/* We've just brought the device into U0 through either the
+		/* We've just brought the device into U0/1/2 through either the
 		 * Resume state after a device remote wakeup, or through the
 		 * U3Exit state after a host-initiated resume.  If it's a device
 		 * initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 93043ed..016f7a1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -311,6 +311,7 @@
  */
 #define PORT_PLS_MASK	(0xf << 5)
 #define XDEV_U0		(0x0 << 5)
+#define XDEV_U1		(0x1 << 5)
 #define XDEV_U2		(0x2 << 5)
 #define XDEV_U3		(0x3 << 5)
 #define XDEV_INACTIVE	(0x6 << 5)
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 1e67234..efa3c86 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -324,6 +324,7 @@
 	usb_deregister_dev(interface, &yurex_class);
 
 	/* prevent more I/O from starting */
+	usb_poison_urb(dev->urb);
 	mutex_lock(&dev->io_mutex);
 	dev->interface = NULL;
 	mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 1e672a5..3811792 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -249,7 +249,7 @@
 #define PS_HARD_RESET_TIME	25
 #define PS_SOURCE_ON		400
 #define PS_SOURCE_OFF		750
-#define FIRST_SOURCE_CAP_TIME	200
+#define FIRST_SOURCE_CAP_TIME	100
 #define VDM_BUSY_TIME		50
 #define VCONN_ON_TIME		100
 
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 41713e5..109f5da 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -77,6 +77,7 @@
 	{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
 	{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
 	{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+	{ USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
 	{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
 	{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
 	{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 972f5a5..1f20fa0 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -560,9 +560,12 @@
 
 static void f81232_close(struct usb_serial_port *port)
 {
+	struct f81232_private *port_priv = usb_get_serial_port_data(port);
+
 	f81232_port_disable(port);
 	usb_serial_generic_close(port);
 	usb_kill_urb(port->interrupt_in_urb);
+	flush_work(&port_priv->interrupt_work);
 }
 
 static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@@ -656,6 +659,40 @@
 	return 0;
 }
 
+static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
+{
+	struct usb_serial_port *port = serial->port[0];
+	struct f81232_private *port_priv = usb_get_serial_port_data(port);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+		usb_kill_urb(port->read_urbs[i]);
+
+	usb_kill_urb(port->interrupt_in_urb);
+
+	if (port_priv)
+		flush_work(&port_priv->interrupt_work);
+
+	return 0;
+}
+
+static int f81232_resume(struct usb_serial *serial)
+{
+	struct usb_serial_port *port = serial->port[0];
+	int result;
+
+	if (tty_port_initialized(&port->port)) {
+		result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+		if (result) {
+			dev_err(&port->dev, "submit interrupt urb failed: %d\n",
+					result);
+			return result;
+		}
+	}
+
+	return usb_serial_generic_resume(serial);
+}
+
 static struct usb_serial_driver f81232_device = {
 	.driver = {
 		.owner =	THIS_MODULE,
@@ -679,6 +716,8 @@
 	.read_int_callback =	f81232_read_int_callback,
 	.port_probe =		f81232_port_probe,
 	.port_remove =		f81232_port_remove,
+	.suspend =		f81232_suspend,
+	.resume =		f81232_resume,
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b88a722..f54931a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -604,6 +604,8 @@
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ddf5ab9..15d220e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -566,7 +566,9 @@
 /*
  * NovaTech product ids (FTDI_VID)
  */
-#define FTDI_NT_ORIONLXM_PID	0x7c90	/* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID		0x7c90	/* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID	0x7c91	/* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID		0x7c92	/* Orion I/O */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 944de65..f1e74f5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -350,6 +350,7 @@
 	struct usb_serial_port *port = urb->context;
 	unsigned char *data = urb->transfer_buffer;
 	unsigned long flags;
+	bool stopped = false;
 	int status = urb->status;
 	int i;
 
@@ -357,33 +358,51 @@
 		if (urb == port->read_urbs[i])
 			break;
 	}
-	set_bit(i, &port->read_urbs_free);
 
 	dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
 							urb->actual_length);
 	switch (status) {
 	case 0:
+		usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
+							data);
+		port->serial->type->process_read_urb(urb);
 		break;
 	case -ENOENT:
 	case -ECONNRESET:
 	case -ESHUTDOWN:
 		dev_dbg(&port->dev, "%s - urb stopped: %d\n",
 							__func__, status);
-		return;
+		stopped = true;
+		break;
 	case -EPIPE:
 		dev_err(&port->dev, "%s - urb stopped: %d\n",
 							__func__, status);
-		return;
+		stopped = true;
+		break;
 	default:
 		dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
 							__func__, status);
-		goto resubmit;
+		break;
 	}
 
-	usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
-	port->serial->type->process_read_urb(urb);
+	/*
+	 * Make sure URB processing is done before marking as free to avoid
+	 * racing with unthrottle() on another CPU. Matches the barriers
+	 * implied by the test_and_clear_bit() in
+	 * usb_serial_generic_submit_read_urb().
+	 */
+	smp_mb__before_atomic();
+	set_bit(i, &port->read_urbs_free);
+	/*
+	 * Make sure URB is marked as free before checking the throttled flag
+	 * to avoid racing with unthrottle() on another CPU. Matches the
+	 * smp_mb() in unthrottle().
+	 */
+	smp_mb__after_atomic();
 
-resubmit:
+	if (stopped)
+		return;
+
 	/* Throttle the device if requested by tty */
 	spin_lock_irqsave(&port->lock, flags);
 	port->throttled = port->throttle_req;
@@ -458,6 +477,12 @@
 	port->throttled = port->throttle_req = 0;
 	spin_unlock_irq(&port->lock);
 
+	/*
+	 * Matches the smp_mb__after_atomic() in
+	 * usb_serial_generic_read_bulk_callback().
+	 */
+	smp_mb();
+
 	if (was_throttled)
 		usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
 }
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 135eb04..ea20322 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -368,8 +368,6 @@
 	if (!urbtrack)
 		return -ENOMEM;
 
-	kref_get(&mos_parport->ref_count);
-	urbtrack->mos_parport = mos_parport;
 	urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urbtrack->urb) {
 		kfree(urbtrack);
@@ -390,6 +388,8 @@
 			     usb_sndctrlpipe(usbdev, 0),
 			     (unsigned char *)urbtrack->setup,
 			     NULL, 0, async_complete, urbtrack);
+	kref_get(&mos_parport->ref_count);
+	urbtrack->mos_parport = mos_parport;
 	kref_init(&urbtrack->ref_count);
 	INIT_LIST_HEAD(&urbtrack->urblist_entry);
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index b2b7c12..9f96dd2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1066,7 +1066,8 @@
 	  .driver_info = RSVD(3) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+	  .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
 	/* Quectel products using Qualcomm vendor ID */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1941,10 +1942,12 @@
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
 	  .driver_info = RSVD(4) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
-	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },	/* D-Link DWM-152/C1 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/C1 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },	/* D-Link DWM-156/A3 */
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),			/* Olicard 600 */
+	  .driver_info = RSVD(4) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },			/* OLICARD300 - MT6225 */
 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 4176d1a..fac3447 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -776,18 +776,16 @@
 		break;
 	case RTS51X_STAT_IDLE:
 	case RTS51X_STAT_SS:
-		usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
-			     atomic_read(&us->pusb_intf->pm_usage_cnt),
+		usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
 			     atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-		if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+		if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
 			usb_stor_dbg(us, "Ready to enter SS state\n");
 			rts51x_set_stat(chip, RTS51X_STAT_SS);
 			/* ignore mass storage interface's children */
 			pm_suspend_ignore_children(&us->pusb_intf->dev, true);
 			usb_autopm_put_interface_async(us->pusb_intf);
-			usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
-				     atomic_read(&us->pusb_intf->pm_usage_cnt),
+			usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
 				     atomic_read(&us->pusb_intf->dev.power.usage_count));
 		}
 		break;
@@ -820,11 +818,10 @@
 	int ret;
 
 	if (working_scsi(srb)) {
-		usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
-			     atomic_read(&us->pusb_intf->pm_usage_cnt),
+		usb_stor_dbg(us, "working scsi, power.usage:%d\n",
 			     atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-		if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+		if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
 			ret = usb_autopm_get_interface(us->pusb_intf);
 			usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
 		}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 13f2c05..afb4b0b 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -81,6 +81,7 @@
 static int slave_alloc (struct scsi_device *sdev)
 {
 	struct us_data *us = host_to_us(sdev->host);
+	int maxp;
 
 	/*
 	 * Set the INQUIRY transfer length to 36.  We don't use any of
@@ -90,20 +91,17 @@
 	sdev->inquiry_len = 36;
 
 	/*
-	 * USB has unusual DMA-alignment requirements: Although the
-	 * starting address of each scatter-gather element doesn't matter,
-	 * the length of each element except the last must be divisible
-	 * by the Bulk maxpacket value.  There's currently no way to
-	 * express this by block-layer constraints, so we'll cop out
-	 * and simply require addresses to be aligned at 512-byte
-	 * boundaries.  This is okay since most block I/O involves
-	 * hardware sectors that are multiples of 512 bytes in length,
-	 * and since host controllers up through USB 2.0 have maxpacket
-	 * values no larger than 512.
-	 *
-	 * But it doesn't suffice for Wireless USB, where Bulk maxpacket
-	 * values can be as large as 2048.  To make that work properly
-	 * will require changes to the block layer.
+	 * USB has unusual scatter-gather requirements: the length of each
+	 * scatterlist element except the last must be divisible by the
+	 * Bulk maxpacket value.  Fortunately this value is always a
+	 * power of 2.  Inform the block layer about this requirement.
+	 */
+	maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
+	blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+	/*
+	 * Some host controllers may have alignment requirements.
+	 * We'll play it safe by requiring 512-byte alignment always.
 	 */
 	blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 64af889..97621e5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,24 +796,33 @@
 {
 	struct uas_dev_info *devinfo =
 		(struct uas_dev_info *)sdev->host->hostdata;
+	int maxp;
 
 	sdev->hostdata = devinfo;
 
 	/*
-	 * USB has unusual DMA-alignment requirements: Although the
-	 * starting address of each scatter-gather element doesn't matter,
-	 * the length of each element except the last must be divisible
-	 * by the Bulk maxpacket value.  There's currently no way to
-	 * express this by block-layer constraints, so we'll cop out
-	 * and simply require addresses to be aligned at 512-byte
-	 * boundaries.  This is okay since most block I/O involves
-	 * hardware sectors that are multiples of 512 bytes in length,
-	 * and since host controllers up through USB 2.0 have maxpacket
-	 * values no larger than 512.
+	 * We have two requirements here. We must satisfy the requirements
+	 * of the physical HC and the demands of the protocol, as we
+	 * definitely want no additional memory allocation in this path
+	 * ruling out using bounce buffers.
 	 *
-	 * But it doesn't suffice for Wireless USB, where Bulk maxpacket
-	 * values can be as large as 2048.  To make that work properly
-	 * will require changes to the block layer.
+	 * For a transmission on USB to continue we must never send
+	 * a package that is smaller than maxpacket. Hence the length of each
+         * scatterlist element except the last must be divisible by the
+         * Bulk maxpacket value.
+	 * If the HC does not ensure that through SG,
+	 * the upper layer must do that. We must assume nothing
+	 * about the capabilities off the HC, so we use the most
+	 * pessimistic requirement.
+	 */
+
+	maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
+	blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+	/*
+	 * The protocol has no requirements on alignment in the strict sense.
+	 * Controllers may or may not have alignment restrictions.
+	 * As this is not exported, we use an extremely conservative guess.
 	 */
 	blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
 
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 5b80718..777a405 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -383,16 +383,10 @@
 	}
 
 	if (usb_endpoint_xfer_isoc(epd)) {
-		/* validate packet size and number of packets */
-		unsigned int maxp, packets, bytes;
-
-		maxp = usb_endpoint_maxp(epd);
-		maxp *= usb_endpoint_maxp_mult(epd);
-		bytes = pdu->u.cmd_submit.transfer_buffer_length;
-		packets = DIV_ROUND_UP(bytes, maxp);
-
+		/* validate number of packets */
 		if (pdu->u.cmd_submit.number_of_packets < 0 ||
-		    pdu->u.cmd_submit.number_of_packets > packets) {
+		    pdu->u.cmd_submit.number_of_packets >
+		    USBIP_MAX_ISO_PACKETS) {
 			dev_err(&sdev->udev->dev,
 				"CMD_SUBMIT: isoc invalid num packets %d\n",
 				pdu->u.cmd_submit.number_of_packets);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 109e65b..0b199a2 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -136,6 +136,13 @@
 #define USBIP_DIR_OUT	0x00
 #define USBIP_DIR_IN	0x01
 
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
 /**
  * struct usbip_header_basic - data pertinent to every request
  * @command: the usbip request type
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7338e43..f9a75df 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1467,11 +1467,11 @@
 		rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
 				   subvendor, subdevice, class, class_mask, 0);
 		if (rc)
-			pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+			pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
 				vendor, device, subvendor, subdevice,
 				class, class_mask, rc);
 		else
-			pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+			pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
 				vendor, device, subvendor, subdevice,
 				class, class_mask);
 	}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 1d48e62..0d5b667 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -53,10 +53,16 @@
 MODULE_PARM_DESC(disable_hugepages,
 		 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+		 "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
 	struct list_head	domain_list;
 	struct mutex		lock;
 	struct rb_root		dma_list;
+	unsigned int		dma_avail;
 	bool			v2;
 	bool			nesting;
 };
@@ -384,6 +390,7 @@
 	vfio_unmap_unpin(iommu, dma);
 	vfio_unlink_dma(iommu, dma);
 	kfree(dma);
+	iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -584,12 +591,18 @@
 		return -EEXIST;
 	}
 
+	if (!iommu->dma_avail) {
+		mutex_unlock(&iommu->lock);
+		return -ENOSPC;
+	}
+
 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
 	if (!dma) {
 		mutex_unlock(&iommu->lock);
 		return -ENOMEM;
 	}
 
+	iommu->dma_avail--;
 	dma->iova = iova;
 	dma->vaddr = vaddr;
 	dma->prot = prot;
@@ -905,6 +918,7 @@
 
 	INIT_LIST_HEAD(&iommu->domain_list);
 	iommu->dma_list = RB_ROOT;
+	iommu->dma_avail = dma_entry_limit;
 	mutex_init(&iommu->lock);
 
 	return iommu;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2383caf..a54dbfe 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -863,8 +863,12 @@
 				u64 start, u64 size, u64 end,
 				u64 userspace_addr, int perm)
 {
-	struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+	struct vhost_umem_node *tmp, *node;
 
+	if (!size)
+		return -EFAULT;
+
+	node = kmalloc(sizeof(*node), GFP_ATOMIC);
 	if (!node)
 		return -ENOMEM;
 
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 72e914d..3cefd60 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -640,7 +640,7 @@
 		hash_del_rcu(&vsock->hash);
 
 	vsock->guest_cid = guest_cid;
-	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
 	spin_unlock_bh(&vhost_vsock_lock);
 
 	return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index d95ae09..baa1510 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -54,10 +54,11 @@
 	if (err < 0)
 		dev_err(pb->dev, "failed to enable power supply\n");
 
+	pwm_enable(pb->pwm);
+
 	if (pb->enable_gpio)
 		gpiod_set_value_cansleep(pb->enable_gpio, 1);
 
-	pwm_enable(pb->pwm);
 	pb->enabled = true;
 }
 
@@ -66,12 +67,12 @@
 	if (!pb->enabled)
 		return;
 
-	pwm_config(pb->pwm, 0, pb->period);
-	pwm_disable(pb->pwm);
-
 	if (pb->enable_gpio)
 		gpiod_set_value_cansleep(pb->enable_gpio, 0);
 
+	pwm_config(pb->pwm, 0, pb->period);
+	pwm_disable(pb->pwm);
+
 	regulator_disable(pb->power_supply);
 	pb->enabled = false;
 }
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0dcdca1..780c22f 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -425,6 +425,9 @@
 {
 	unsigned int x;
 
+	if (image->width > info->var.xres || image->height > info->var.yres)
+		return;
+
 	if (rotate == FB_ROTATE_UR) {
 		for (x = 0;
 		     x < num && image->dx + image->width <= info->var.xres;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 5d8398e..5a477ba 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1015,7 +1015,8 @@
 static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id)
 {
 	struct buf_data *pcmds = file->private_data;
-	int blen, len, i;
+	unsigned int len;
+	int blen, i;
 	char *buf, *bufp, *bp;
 	struct dsi_ctrl_hdr *dchdr;
 
@@ -1059,7 +1060,7 @@
 	while (len >= sizeof(*dchdr)) {
 		dchdr = (struct dsi_ctrl_hdr *)bp;
 		dchdr->dlen = ntohs(dchdr->dlen);
-		if (dchdr->dlen > len || dchdr->dlen < 0) {
+		if (dchdr->dlen > (len - sizeof(*dchdr)) || dchdr->dlen < 0) {
 			pr_err("%s: dtsi cmd=%x error, len=%d\n",
 				__func__, dchdr->dtype, dchdr->dlen);
 			kfree(buf);
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
index aad1cc4..c7ebf03 100644
--- a/drivers/video/fbdev/sm712.h
+++ b/drivers/video/fbdev/sm712.h
@@ -15,14 +15,10 @@
 
 #define FB_ACCEL_SMI_LYNX 88
 
-#define SCREEN_X_RES      1024
-#define SCREEN_Y_RES      600
-#define SCREEN_BPP        16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE	  0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE	  0x00800000
+#define SCREEN_X_RES          1024
+#define SCREEN_Y_RES_PC       768
+#define SCREEN_Y_RES_NETBOOK  600
+#define SCREEN_BPP            16
 
 #define dac_reg	(0x3c8)
 #define dac_val	(0x3c9)
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 73cb4ff..0d92ff3 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -530,6 +530,65 @@
 			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
 		},
 	},
+	{	/*  1024 x 768  16Bpp  60Hz */
+		1024, 768, 16, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
 	{	/*  mode#5: 1024 x 768  24Bpp  60Hz */
 		1024, 768, 24, 60,
 		/*  Init_MISC */
@@ -827,67 +886,80 @@
 
 static int smtc_blank(int blank_mode, struct fb_info *info)
 {
+	struct smtcfb_info *sfb = info->par;
+
 	/* clear DPMS setting */
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
 		/* Screen On: HSync: On, VSync : On */
+
+		switch (sfb->chip_id) {
+		case 0x710:
+		case 0x712:
+			smtc_seqw(0x6a, 0x16);
+			smtc_seqw(0x6b, 0x02);
+			break;
+		case 0x720:
+			smtc_seqw(0x6a, 0x0d);
+			smtc_seqw(0x6b, 0x02);
+			break;
+		}
+
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
 		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
 		smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
 		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
-		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		break;
 	case FB_BLANK_NORMAL:
 		/* Screen Off: HSync: On, VSync : On   Soft blank */
-		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
 		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_VSYNC_SUSPEND:
 		/* Screen On: HSync: On, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_HSYNC_SUSPEND:
 		/* Screen On: HSync: Off, VSync : On */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	case FB_BLANK_POWERDOWN:
 		/* Screen On: HSync: Off, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
 		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
 		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
 		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
 		break;
 	default:
 		return -EINVAL;
@@ -1144,8 +1216,10 @@
 
 		/* init SEQ register SR30 - SR75 */
 		for (i = 0; i < SIZE_SR30_SR75; i++)
-			if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
-			    (i + 0x30) != 0x6b)
+			if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
+			    (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
+			    (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
+			    (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
 				smtc_seqw(i + 0x30,
 					  vgamode[j].init_sr30_sr75[i]);
 
@@ -1170,8 +1244,12 @@
 			smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
 
 		/* init CRTC register CR30 - CR4D */
-		for (i = 0; i < SIZE_CR30_CR4D; i++)
+		for (i = 0; i < SIZE_CR30_CR4D; i++) {
+			if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
+				/* side-effect, don't write to CR3B-CR3F */
+				continue;
 			smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+		}
 
 		/* init CRTC register CR90 - CRA7 */
 		for (i = 0; i < SIZE_CR90_CRA7; i++)
@@ -1322,6 +1400,11 @@
 {
 	sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
 
+	if (sfb->chip_id == 0x720)
+		/* on SM720, the framebuffer starts at the 1 MB offset */
+		sfb->fb->fix.smem_start += 0x00200000;
+
+	/* XXX: is it safe for SM720 on Big-Endian? */
 	if (sfb->fb->var.bits_per_pixel == 32)
 		sfb->fb->fix.smem_start += big_addr;
 
@@ -1359,12 +1442,82 @@
 	outb_p(0x11, 0x3c5);
 }
 
+static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
+{
+	u8 vram;
+
+	switch (sfb->chip_id) {
+	case 0x710:
+	case 0x712:
+		/*
+		 * Assume SM712 graphics chip has 4MB VRAM.
+		 *
+		 * FIXME: SM712 can have 2MB VRAM, which is used on earlier
+		 * laptops, such as IBM Thinkpad 240X. This driver would
+		 * probably crash on those machines. If anyone gets one of
+		 * those and is willing to help, run "git blame" and send me
+		 * an E-mail.
+		 */
+		return 0x00400000;
+	case 0x720:
+		outb_p(0x76, 0x3c4);
+		vram = inb_p(0x3c5) >> 6;
+
+		if (vram == 0x00)
+			return 0x00800000;  /* 8 MB */
+		else if (vram == 0x01)
+			return 0x01000000;  /* 16 MB */
+		else if (vram == 0x02)
+			return 0x00400000;  /* illegal, fallback to 4 MB */
+		else if (vram == 0x03)
+			return 0x00400000;  /* 4 MB */
+	}
+	return 0;  /* unknown hardware */
+}
+
+static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
+{
+	/* get mode parameter from smtc_scr_info */
+	if (smtc_scr_info.lfb_width != 0) {
+		sfb->fb->var.xres = smtc_scr_info.lfb_width;
+		sfb->fb->var.yres = smtc_scr_info.lfb_height;
+		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+		goto final;
+	}
+
+	/*
+	 * No parameter, default resolution is 1024x768-16.
+	 *
+	 * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
+	 * panel, also see the comments about Thinkpad 240X above.
+	 */
+	sfb->fb->var.xres = SCREEN_X_RES;
+	sfb->fb->var.yres = SCREEN_Y_RES_PC;
+	sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+
+#ifdef CONFIG_MIPS
+	/*
+	 * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
+	 * target platform of this driver, but nearly all old x86 laptops have
+	 * 1024x768. Lighting 768 panels using 600's timings would partially
+	 * garble the display, so we don't want that. But it's not possible to
+	 * distinguish them reliably.
+	 *
+	 * So we change the default to 768, but keep 600 as-is on MIPS.
+	 */
+	sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
+#endif
+
+final:
+	big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+}
+
 static int smtcfb_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
 	struct smtcfb_info *sfb;
 	struct fb_info *info;
-	u_long smem_size = 0x00800000;	/* default 8MB */
+	u_long smem_size;
 	int err;
 	unsigned long mmio_base;
 
@@ -1404,29 +1557,19 @@
 
 	sm7xx_init_hw();
 
-	/* get mode parameter from smtc_scr_info */
-	if (smtc_scr_info.lfb_width != 0) {
-		sfb->fb->var.xres = smtc_scr_info.lfb_width;
-		sfb->fb->var.yres = smtc_scr_info.lfb_height;
-		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
-	} else {
-		/* default resolution 1024x600 16bit mode */
-		sfb->fb->var.xres = SCREEN_X_RES;
-		sfb->fb->var.yres = SCREEN_Y_RES;
-		sfb->fb->var.bits_per_pixel = SCREEN_BPP;
-	}
-
-	big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
 	/* Map address and memory detection */
 	mmio_base = pci_resource_start(pdev, 0);
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
 
+	smem_size = sm7xx_vram_probe(sfb);
+	dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
+					smem_size / 1048576);
+
 	switch (sfb->chip_id) {
 	case 0x710:
 	case 0x712:
 		sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
 		sfb->fb->fix.mmio_len = 0x00400000;
-		smem_size = SM712_VIDEOMEMORYSIZE;
 		sfb->lfb = ioremap(mmio_base, mmio_addr);
 		if (!sfb->lfb) {
 			dev_err(&pdev->dev,
@@ -1458,8 +1601,7 @@
 	case 0x720:
 		sfb->fb->fix.mmio_start = mmio_base;
 		sfb->fb->fix.mmio_len = 0x00200000;
-		smem_size = SM722_VIDEOMEMORYSIZE;
-		sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+		sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
 		sfb->lfb = sfb->dp_regs + 0x00200000;
 		sfb->mmio = (smtc_regbaseaddress =
 		    sfb->dp_regs + 0x000c0000);
@@ -1476,6 +1618,9 @@
 		goto failed_fb;
 	}
 
+	/* probe and decide resolution */
+	sm7xx_resolution_probe(sfb);
+
 	/* can support 32 bpp */
 	if (15 == sfb->fb->var.bits_per_pixel)
 		sfb->fb->var.bits_per_pixel = 16;
@@ -1486,7 +1631,11 @@
 	if (err)
 		goto failed;
 
-	smtcfb_setmode(sfb);
+	/*
+	 * The screen would be temporarily garbled when sm712fb takes over
+	 * vesafb or VGA text mode. Zero the framebuffer.
+	 */
+	memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
 
 	err = register_framebuffer(info);
 	if (err < 0)
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 150ce2a..732e9ab 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -215,6 +215,9 @@
 	 * hypervisor.
 	 */
 	lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+	if (param.count == 0 ||
+	    param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
+		return -EINVAL;
 	num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	/* Allocate the buffers we need */
@@ -334,8 +337,8 @@
 	struct fsl_hv_ioctl_prop param;
 	char __user *upath, *upropname;
 	void __user *upropval;
-	char *path = NULL, *propname = NULL;
-	void *propval = NULL;
+	char *path, *propname;
+	void *propval;
 	int ret = 0;
 
 	/* Get the parameters from the user. */
@@ -347,32 +350,30 @@
 	upropval = (void __user *)(uintptr_t)param.propval;
 
 	path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
-	if (IS_ERR(path)) {
-		ret = PTR_ERR(path);
-		goto out;
-	}
+	if (IS_ERR(path))
+		return PTR_ERR(path);
 
 	propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
 	if (IS_ERR(propname)) {
 		ret = PTR_ERR(propname);
-		goto out;
+		goto err_free_path;
 	}
 
 	if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
 		ret = -EINVAL;
-		goto out;
+		goto err_free_propname;
 	}
 
 	propval = kmalloc(param.proplen, GFP_KERNEL);
 	if (!propval) {
 		ret = -ENOMEM;
-		goto out;
+		goto err_free_propname;
 	}
 
 	if (set) {
 		if (copy_from_user(propval, upropval, param.proplen)) {
 			ret = -EFAULT;
-			goto out;
+			goto err_free_propval;
 		}
 
 		param.ret = fh_partition_set_dtprop(param.handle,
@@ -391,7 +392,7 @@
 			if (copy_to_user(upropval, propval, param.proplen) ||
 			    put_user(param.proplen, &p->proplen)) {
 				ret = -EFAULT;
-				goto out;
+				goto err_free_propval;
 			}
 		}
 	}
@@ -399,10 +400,12 @@
 	if (put_user(param.ret, &p->ret))
 		ret = -EFAULT;
 
-out:
-	kfree(path);
+err_free_propval:
 	kfree(propval);
+err_free_propname:
 	kfree(propname);
+err_free_path:
+	kfree(path);
 
 	return ret;
 }
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 8977f40..2f09294 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1040,6 +1040,8 @@
 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
 		if (queue)
 			break;
+		if (!may_reduce_num)
+			return NULL;
 	}
 
 	if (!num)
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 59d74d1..2287e1b 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1039,15 +1039,15 @@
 	/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
 	alt = 3;
 	err = usb_set_interface(dev->udev,
-		intf->altsetting[alt].desc.bInterfaceNumber, alt);
+		intf->cur_altsetting->desc.bInterfaceNumber, alt);
 	if (err) {
 		dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
 			"for %d interface: err=%d.\n", alt,
-			intf->altsetting[alt].desc.bInterfaceNumber, err);
+			intf->cur_altsetting->desc.bInterfaceNumber, err);
 		goto err_out_clear;
 	}
 
-	iface_desc = &intf->altsetting[alt];
+	iface_desc = intf->cur_altsetting;
 	if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
 		pr_info("Num endpoints=%d. It is not DS9490R.\n",
 			iface_desc->desc.bNumEndpoints);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 072e759..a8ff430 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -59,6 +59,8 @@
 	Opt_cache_loose, Opt_fscache, Opt_mmap,
 	/* Access options */
 	Opt_access, Opt_posixacl,
+	/* Lock timeout option */
+	Opt_locktimeout,
 	/* Error token */
 	Opt_err
 };
@@ -78,6 +80,7 @@
 	{Opt_cachetag, "cachetag=%s"},
 	{Opt_access, "access=%s"},
 	{Opt_posixacl, "posixacl"},
+	{Opt_locktimeout, "locktimeout=%u"},
 	{Opt_err, NULL}
 };
 
@@ -126,6 +129,7 @@
 #ifdef CONFIG_9P_FSCACHE
 	v9ses->cachetag = NULL;
 #endif
+	v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
 
 	if (!opts)
 		return 0;
@@ -298,6 +302,23 @@
 #endif
 			break;
 
+		case Opt_locktimeout:
+			r = match_int(&args[0], &option);
+			if (r < 0) {
+				p9_debug(P9_DEBUG_ERROR,
+					 "integer field, but no integer?\n");
+				ret = r;
+				continue;
+			}
+			if (option < 1) {
+				p9_debug(P9_DEBUG_ERROR,
+					 "locktimeout must be a greater than zero integer.\n");
+				ret = -EINVAL;
+				continue;
+			}
+			v9ses->session_lock_timeout = (long)option * HZ;
+			break;
+
 		default:
 			continue;
 		}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 443d12e..ce6ca9f 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,6 +116,7 @@
 	struct list_head slist; /* list of sessions registered with v9fs */
 	struct backing_dev_info bdi;
 	struct rw_semaphore rename_sem;
+	long session_lock_timeout; /* retry interval for blocking locks */
 };
 
 /* cache_validity flags */
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 5a0db6d..aaee1e6 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -40,6 +40,9 @@
  */
 #define P9_LOCK_TIMEOUT (30*HZ)
 
+/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
+#define V9FS_STAT2INODE_KEEP_ISIZE 1
+
 extern struct file_system_type v9fs_fs_type;
 extern const struct address_space_operations v9fs_addr_operations;
 extern const struct file_operations v9fs_file_operations;
@@ -61,8 +64,10 @@
 		    struct inode *inode, umode_t mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
+void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
+		      struct super_block *sb, unsigned int flags);
+void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+			   unsigned int flags);
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
@@ -83,4 +88,18 @@
 }
 
 int v9fs_open_to_dotl_flags(int flags);
+
+static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+	/*
+	 * 32-bit need the lock, concurrent updates could break the
+	 * sequences and make i_size_read() loop forever.
+	 * 64-bit updates are atomic and can skip the locking.
+	 */
+	if (sizeof(i_size) > sizeof(long))
+		spin_lock(&inode->i_lock);
+	i_size_write(inode, i_size);
+	if (sizeof(i_size) > sizeof(long))
+		spin_unlock(&inode->i_lock);
+}
 #endif
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 48db9a9..cb6c403 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -105,7 +105,6 @@
 	int err = 0;
 	struct p9_fid *fid;
 	int buflen;
-	int reclen = 0;
 	struct p9_rdir *rdir;
 	struct kvec kvec;
 
@@ -138,11 +137,10 @@
 		while (rdir->head < rdir->tail) {
 			err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
 					  rdir->tail - rdir->head, &st);
-			if (err) {
+			if (err <= 0) {
 				p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
 				return -EIO;
 			}
-			reclen = st.size+2;
 
 			over = !dir_emit(ctx, st.name, strlen(st.name),
 					 v9fs_qid2ino(&st.qid), dt_type(&st));
@@ -150,8 +148,8 @@
 			if (over)
 				return 0;
 
-			rdir->head += reclen;
-			ctx->pos += reclen;
+			rdir->head += err;
+			ctx->pos += err;
 		}
 	}
 }
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 398a3ed..79ff727 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -154,6 +154,7 @@
 	uint8_t status = P9_LOCK_ERROR;
 	int res = 0;
 	unsigned char fl_type;
+	struct v9fs_session_info *v9ses;
 
 	fid = filp->private_data;
 	BUG_ON(fid == NULL);
@@ -189,6 +190,8 @@
 	if (IS_SETLKW(cmd))
 		flock.flags = P9_LOCK_FLAGS_BLOCK;
 
+	v9ses = v9fs_inode2v9ses(file_inode(filp));
+
 	/*
 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
 	 * for lock request, keep on trying
@@ -202,7 +205,8 @@
 			break;
 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
 			break;
-		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
+				!= 0)
 			break;
 		/*
 		 * p9_client_lock_dotl overwrites flock.client_id with the
@@ -442,7 +446,11 @@
 		i_size = i_size_read(inode);
 		if (iocb->ki_pos > i_size) {
 			inode_add_bytes(inode, iocb->ki_pos - i_size);
-			i_size_write(inode, iocb->ki_pos);
+			/*
+			 * Need to serialize against i_size_write() in
+			 * v9fs_stat2inode()
+			 */
+			v9fs_i_size_write(inode, iocb->ki_pos);
 		}
 		return retval;
 	}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index f8ab4a6..ddd1eb6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -538,7 +538,7 @@
 	if (retval)
 		goto error;
 
-	v9fs_stat2inode(st, inode, sb);
+	v9fs_stat2inode(st, inode, sb, 0);
 	v9fs_cache_inode_get_cookie(inode);
 	unlock_new_inode(inode);
 	return inode;
@@ -1078,7 +1078,7 @@
 	if (IS_ERR(st))
 		return PTR_ERR(st);
 
-	v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
+	v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
 	generic_fillattr(d_inode(dentry), stat);
 
 	p9stat_free(st);
@@ -1156,12 +1156,13 @@
  * @stat: Plan 9 metadata (mistat) structure
  * @inode: inode to populate
  * @sb: superblock of filesystem
+ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
  *
  */
 
 void
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
-	struct super_block *sb)
+		 struct super_block *sb, unsigned int flags)
 {
 	umode_t mode;
 	char ext[32];
@@ -1202,10 +1203,11 @@
 	mode = p9mode2perm(v9ses, stat);
 	mode |= inode->i_mode & ~S_IALLUGO;
 	inode->i_mode = mode;
-	i_size_write(inode, stat->length);
 
+	if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+		v9fs_i_size_write(inode, stat->length);
 	/* not real number of blocks, but 512 byte ones ... */
-	inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
+	inode->i_blocks = (stat->length + 512 - 1) >> 9;
 	v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
 }
 
@@ -1402,9 +1404,9 @@
 {
 	int umode;
 	dev_t rdev;
-	loff_t i_size;
 	struct p9_wstat *st;
 	struct v9fs_session_info *v9ses;
+	unsigned int flags;
 
 	v9ses = v9fs_inode2v9ses(inode);
 	st = p9_client_stat(fid);
@@ -1417,16 +1419,13 @@
 	if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
 		goto out;
 
-	spin_lock(&inode->i_lock);
 	/*
 	 * We don't want to refresh inode->i_size,
 	 * because we may have cached data
 	 */
-	i_size = inode->i_size;
-	v9fs_stat2inode(st, inode, inode->i_sb);
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
-		inode->i_size = i_size;
-	spin_unlock(&inode->i_lock);
+	flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+		V9FS_STAT2INODE_KEEP_ISIZE : 0;
+	v9fs_stat2inode(st, inode, inode->i_sb, flags);
 out:
 	p9stat_free(st);
 	kfree(st);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index c3dd0d4..425bc1a 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -143,7 +143,7 @@
 	if (retval)
 		goto error;
 
-	v9fs_stat2inode_dotl(st, inode);
+	v9fs_stat2inode_dotl(st, inode, 0);
 	v9fs_cache_inode_get_cookie(inode);
 	retval = v9fs_get_acl(inode, fid);
 	if (retval)
@@ -496,7 +496,7 @@
 	if (IS_ERR(st))
 		return PTR_ERR(st);
 
-	v9fs_stat2inode_dotl(st, d_inode(dentry));
+	v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
 	generic_fillattr(d_inode(dentry), stat);
 	/* Change block size to what the server returned */
 	stat->blksize = st->st_blksize;
@@ -607,11 +607,13 @@
  * v9fs_stat2inode_dotl - populate an inode structure with stat info
  * @stat: stat structure
  * @inode: inode to populate
+ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
  *
  */
 
 void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+		      unsigned int flags)
 {
 	umode_t mode;
 	struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -631,7 +633,8 @@
 		mode |= inode->i_mode & ~S_IALLUGO;
 		inode->i_mode = mode;
 
-		i_size_write(inode, stat->st_size);
+		if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+			v9fs_i_size_write(inode, stat->st_size);
 		inode->i_blocks = stat->st_blocks;
 	} else {
 		if (stat->st_result_mask & P9_STATS_ATIME) {
@@ -661,8 +664,9 @@
 		}
 		if (stat->st_result_mask & P9_STATS_RDEV)
 			inode->i_rdev = new_decode_dev(stat->st_rdev);
-		if (stat->st_result_mask & P9_STATS_SIZE)
-			i_size_write(inode, stat->st_size);
+		if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
+		    stat->st_result_mask & P9_STATS_SIZE)
+			v9fs_i_size_write(inode, stat->st_size);
 		if (stat->st_result_mask & P9_STATS_BLOCKS)
 			inode->i_blocks = stat->st_blocks;
 	}
@@ -928,9 +932,9 @@
 
 int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
 {
-	loff_t i_size;
 	struct p9_stat_dotl *st;
 	struct v9fs_session_info *v9ses;
+	unsigned int flags;
 
 	v9ses = v9fs_inode2v9ses(inode);
 	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
@@ -942,16 +946,13 @@
 	if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
 		goto out;
 
-	spin_lock(&inode->i_lock);
 	/*
 	 * We don't want to refresh inode->i_size,
 	 * because we may have cached data
 	 */
-	i_size = inode->i_size;
-	v9fs_stat2inode_dotl(st, inode);
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
-		inode->i_size = i_size;
-	spin_unlock(&inode->i_lock);
+	flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+		V9FS_STAT2INODE_KEEP_ISIZE : 0;
+	v9fs_stat2inode_dotl(st, inode, flags);
 out:
 	kfree(st);
 	return 0;
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index de3ed86..06ff1a9 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -165,7 +165,7 @@
 			goto release_sb;
 		}
 		d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
-		v9fs_stat2inode_dotl(st, d_inode(root));
+		v9fs_stat2inode_dotl(st, d_inode(root), 0);
 		kfree(st);
 	} else {
 		struct p9_wstat *st = NULL;
@@ -176,7 +176,7 @@
 		}
 
 		d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
-		v9fs_stat2inode(st, d_inode(root), sb);
+		v9fs_stat2inode(st, d_inode(root), sb, 0);
 
 		p9stat_free(st);
 		kfree(st);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 85dc7ab..2973d25 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2018,14 +2018,20 @@
 			extent_item_objectid);
 
 	if (!search_commit_root) {
-		trans = btrfs_join_transaction(fs_info->extent_root);
-		if (IS_ERR(trans))
-			return PTR_ERR(trans);
-		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
-	} else {
-		down_read(&fs_info->commit_root_sem);
+		trans = btrfs_attach_transaction(fs_info->extent_root);
+		if (IS_ERR(trans)) {
+			if (PTR_ERR(trans) != -ENOENT &&
+			    PTR_ERR(trans) != -EROFS)
+				return PTR_ERR(trans);
+			trans = NULL;
+		}
 	}
 
+	if (trans)
+		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+	else
+		down_read(&fs_info->commit_root_sem);
+
 	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
 				   tree_mod_seq_elem.seq, &refs,
 				   &extent_item_pos);
@@ -2056,7 +2062,7 @@
 
 	free_leaf_list(refs);
 out:
-	if (!search_commit_root) {
+	if (trans) {
 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
 		btrfs_end_transaction(trans, fs_info->extent_root);
 	} else {
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7938c48..6b29165 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -11150,9 +11150,9 @@
  * transaction.
  */
 static int btrfs_trim_free_extents(struct btrfs_device *device,
-				   u64 minlen, u64 *trimmed)
+				   struct fstrim_range *range, u64 *trimmed)
 {
-	u64 start = 0, len = 0;
+	u64 start = range->start, len = 0;
 	int ret;
 
 	*trimmed = 0;
@@ -11188,8 +11188,8 @@
 			atomic_inc(&trans->use_count);
 		spin_unlock(&fs_info->trans_lock);
 
-		ret = find_free_dev_extent_start(trans, device, minlen, start,
-						 &start, &len);
+		ret = find_free_dev_extent_start(trans, device, range->minlen,
+						 start, &start, &len);
 		if (trans)
 			btrfs_put_transaction(trans);
 
@@ -11201,6 +11201,16 @@
 			break;
 		}
 
+		/* If we are out of the passed range break */
+		if (start > range->start + range->len - 1) {
+			mutex_unlock(&fs_info->chunk_mutex);
+			ret = 0;
+			break;
+		}
+
+		start = max(range->start, start);
+		len = min(range->len, len);
+
 		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
 		up_read(&fs_info->commit_root_sem);
 		mutex_unlock(&fs_info->chunk_mutex);
@@ -11211,6 +11221,10 @@
 		start += len;
 		*trimmed += bytes;
 
+		/* We've trimmed enough */
+		if (*trimmed >= range->len)
+			break;
+
 		if (fatal_signal_pending(current)) {
 			ret = -ERESTARTSYS;
 			break;
@@ -11295,8 +11309,7 @@
 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	devices = &fs_info->fs_devices->devices;
 	list_for_each_entry(device, devices, dev_list) {
-		ret = btrfs_trim_free_extents(device, range->minlen,
-					      &group_trimmed);
+		ret = btrfs_trim_free_extents(device, range, &group_trimmed);
 		if (ret) {
 			dev_failed++;
 			dev_ret = ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f605bfa..84051b2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3018,11 +3018,11 @@
 		 */
 		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
 		    prev_em_start && *prev_em_start != (u64)-1 &&
-		    *prev_em_start != em->orig_start)
+		    *prev_em_start != em->start)
 			force_bio_submit = true;
 
 		if (prev_em_start)
-			*prev_em_start = em->orig_start;
+			*prev_em_start = em->start;
 
 		free_extent_map(em);
 		em = NULL;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 242584a..a67143c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -385,6 +385,16 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
+	/*
+	 * If the fs is mounted with nologreplay, which requires it to be
+	 * mounted in RO mode as well, we can not allow discard on free space
+	 * inside block groups, because log trees refer to extents that are not
+	 * pinned in a block group's free space cache (pinning the extents is
+	 * precisely the first phase of replaying a log tree).
+	 */
+	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+		return -EROFS;
+
 	rcu_read_lock();
 	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
 				dev_list) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index af6a776..5aa07de 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2395,8 +2395,9 @@
 			bitmap_clear(rbio->dbitmap, pagenr, 1);
 		kunmap(p);
 
-		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+		for (stripe = 0; stripe < nr_data; stripe++)
 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+		kunmap(p_page);
 	}
 
 	__free_page(p_page);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 47d11a3..a36bb75 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3343,9 +3343,16 @@
 	}
 	btrfs_release_path(path);
 
-	/* find the first key from this transaction again */
+	/*
+	 * Find the first key from this transaction again.  See the note for
+	 * log_new_dir_dentries, if we're logging a directory recursively we
+	 * won't be holding its i_mutex, which means we can modify the directory
+	 * while we're logging it.  If we remove an entry between our first
+	 * search and this search we'll not find the key again and can just
+	 * bail.
+	 */
 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
-	if (WARN_ON(ret != 0))
+	if (ret != 0)
 		goto done;
 
 	/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1fe447e..dbb78c3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6439,10 +6439,10 @@
 	}
 
 	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
-	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
 	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
 	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
-	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
 	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
 	     num_stripes != 1)) {
 		btrfs_err(root->fs_info,
diff --git a/fs/buffer.c b/fs/buffer.c
index 3477a14..9915966 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3077,6 +3077,13 @@
 	/* Uhhuh. We've got a bio that straddles the device size! */
 	truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
 
+	/*
+	 * The bio contains more than one segment which spans EOD, just return
+	 * and let IO layer turn it into an EIO
+	 */
+	if (truncated_bytes > bvec->bv_len)
+		return;
+
 	/* Truncate the bio.. */
 	bio->bi_iter.bi_size -= truncated_bytes;
 	bvec->bv_len -= truncated_bytes;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index cec2569..2ffc7fe 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1471,6 +1471,7 @@
 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 {
 	struct ceph_inode_info *dci = ceph_inode(dir);
+	unsigned hash;
 
 	switch (dci->i_dir_layout.dl_dir_hash) {
 	case 0:	/* for backward compat */
@@ -1478,8 +1479,11 @@
 		return dn->d_name.hash;
 
 	default:
-		return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+		spin_lock(&dn->d_lock);
+		hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
 				     dn->d_name.name, dn->d_name.len);
+		spin_unlock(&dn->d_lock);
+		return hash;
 	}
 }
 
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 30d9d9e..7a40525 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -523,6 +523,7 @@
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 
+	kfree(ci->i_symlink);
 	kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -554,7 +555,6 @@
 		ceph_put_snap_realm(mdsc, realm);
 	}
 
-	kfree(ci->i_symlink);
 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
 		frag = rb_entry(n, struct ceph_inode_frag, node);
 		rb_erase(n, &ci->i_fragtree);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 6cbd0d8..67cb9d0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1187,6 +1187,15 @@
 			list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
 			ci->i_prealloc_cap_flush = NULL;
 		}
+
+               if (drop &&
+                  ci->i_wrbuffer_ref_head == 0 &&
+                  ci->i_wr_ref == 0 &&
+                  ci->i_dirty_caps == 0 &&
+                  ci->i_flushing_caps == 0) {
+                      ceph_put_snap_context(ci->i_head_snapc);
+                      ci->i_head_snapc = NULL;
+               }
 	}
 	spin_unlock(&ci->i_ceph_lock);
 	while (!list_empty(&to_remove)) {
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 411e9df..3a76ae0 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -563,7 +563,12 @@
 	old_snapc = NULL;
 
 update_snapc:
-	if (ci->i_head_snapc) {
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           ci->i_wr_ref == 0 &&
+           ci->i_dirty_caps == 0 &&
+           ci->i_flushing_caps == 0) {
+               ci->i_head_snapc = NULL;
+       } else {
 		ci->i_head_snapc = ceph_get_snap_context(new_snapc);
 		dout(" new snapc is %p\n", new_snapc);
 	}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2a89030..c42cbd1 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -742,6 +742,12 @@
 	return;
 }
 
+static int ceph_remount(struct super_block *sb, int *flags, char *data)
+{
+	sync_filesystem(sb);
+	return 0;
+}
+
 static const struct super_operations ceph_super_ops = {
 	.alloc_inode	= ceph_alloc_inode,
 	.destroy_inode	= ceph_destroy_inode,
@@ -750,6 +756,7 @@
 	.evict_inode	= ceph_evict_inode,
 	.sync_fs        = ceph_sync_fs,
 	.put_super	= ceph_put_super,
+	.remount_fs	= ceph_remount,
 	.show_options   = ceph_show_options,
 	.statfs		= ceph_statfs,
 	.umount_begin   = ceph_umount_begin,
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 9156be5..4660208 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -271,9 +271,9 @@
 {
 	cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
 	cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
-	cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
+	cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
 		 ref->flags, ref->server_type);
-	cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
+	cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
 		 ref->ref_flag, ref->path_consumed);
 }
 
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ed4736..5367b684 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1157,6 +1157,7 @@
 }
 
 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 
 #define CIFS_CACHE_READ_FLG	1
@@ -1651,6 +1652,7 @@
 #endif /* CONFIG_CIFS_ACL */
 
 void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5b287d3..4b350ac 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1201,6 +1201,11 @@
 	const char *delims = "/\\";
 	size_t len;
 
+	if (unlikely(!devname || !*devname)) {
+		cifs_dbg(VFS, "Device name not specified.\n");
+		return -EINVAL;
+	}
+
 	/* make sure we have a valid UNC double delimiter prefix */
 	len = strspn(devname, delims);
 	if (len != 2)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8ec2963..e7f1773 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -358,13 +358,31 @@
 	return cifs_file;
 }
 
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
+	_cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+{
 	struct inode *inode = d_inode(cifs_file->dentry);
 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
 	struct TCP_Server_Info *server = tcon->ses->server;
@@ -411,7 +429,8 @@
 
 	spin_unlock(&tcon->open_file_lock);
 
-	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+	oplock_break_cancelled = wait_oplock_handler ?
+		cancel_work_sync(&cifs_file->oplock_break) : false;
 
 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
 		struct TCP_Server_Info *server = tcon->ses->server;
@@ -1627,8 +1646,20 @@
 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
 
 out:
-	if (flock->fl_flags & FL_POSIX && !rc)
+	if (flock->fl_flags & FL_POSIX) {
+		/*
+		 * If this is a request to remove all locks because we
+		 * are closing the file, it doesn't matter if the
+		 * unlocking failed as both cifs.ko and the SMB server
+		 * remove the lock on file close
+		 */
+		if (rc) {
+			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
+			if (!(flock->fl_flags & FL_CLOSE))
+				return rc;
+		}
 		rc = locks_lock_file_wait(file, flock);
+	}
 	return rc;
 }
 
@@ -2797,14 +2828,16 @@
 	 * these pages but not on the region from pos to ppos+len-1.
 	 */
 	written = cifs_user_writev(iocb, from);
-	if (written > 0 && CIFS_CACHE_READ(cinode)) {
+	if (CIFS_CACHE_READ(cinode)) {
 		/*
-		 * Windows 7 server can delay breaking level2 oplock if a write
-		 * request comes - break it on the client to prevent reading
-		 * an old data.
+		 * We have read level caching and we have just sent a write
+		 * request to the server thus making data in the cache stale.
+		 * Zap the cache and set oplock/lease level to NONE to avoid
+		 * reading stale data from the cache. All subsequent read
+		 * operations will read new data from the server.
 		 */
 		cifs_zap_mapping(inode);
-		cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
+		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
 			 inode);
 		cinode->oplock = 0;
 	}
@@ -3899,6 +3932,7 @@
 							     cinode);
 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
 	}
+	_cifsFileInfo_put(cfile, false /* do not wait for ourself */);
 	cifs_done_oplock_break(cinode);
 }
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 57c938f..786f67b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -771,43 +771,50 @@
 	} else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
 		   (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
 		      == 0)) {
-			/*
-			 * For SMB2 and later the backup intent flag is already
-			 * sent if needed on open and there is no path based
-			 * FindFirst operation to use to retry with
-			 */
+		/*
+		 * For SMB2 and later the backup intent flag is already
+		 * sent if needed on open and there is no path based
+		 * FindFirst operation to use to retry with
+		 */
 
-			srchinf = kzalloc(sizeof(struct cifs_search_info),
-						GFP_KERNEL);
-			if (srchinf == NULL) {
-				rc = -ENOMEM;
-				goto cgii_exit;
-			}
+		srchinf = kzalloc(sizeof(struct cifs_search_info),
+					GFP_KERNEL);
+		if (srchinf == NULL) {
+			rc = -ENOMEM;
+			goto cgii_exit;
+		}
 
-			srchinf->endOfSearch = false;
+		srchinf->endOfSearch = false;
+		if (tcon->unix_ext)
+			srchinf->info_level = SMB_FIND_FILE_UNIX;
+		else if ((tcon->ses->capabilities &
+			 tcon->ses->server->vals->cap_nt_find) == 0)
+			srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
+		else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
 			srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+		else /* no srvino useful for fallback to some netapp */
+			srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
 
-			srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
-					CIFS_SEARCH_CLOSE_AT_END |
-					CIFS_SEARCH_BACKUP_SEARCH;
+		srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
+				CIFS_SEARCH_CLOSE_AT_END |
+				CIFS_SEARCH_BACKUP_SEARCH;
 
-			rc = CIFSFindFirst(xid, tcon, full_path,
-				cifs_sb, NULL, srchflgs, srchinf, false);
-			if (!rc) {
-				data =
-				(FILE_ALL_INFO *)srchinf->srch_entries_start;
+		rc = CIFSFindFirst(xid, tcon, full_path,
+			cifs_sb, NULL, srchflgs, srchinf, false);
+		if (!rc) {
+			data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
 
-				cifs_dir_info_to_fattr(&fattr,
-				(FILE_DIRECTORY_INFO *)data, cifs_sb);
-				fattr.cf_uniqueid = le64_to_cpu(
-				((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
-				validinum = true;
+			cifs_dir_info_to_fattr(&fattr,
+			(FILE_DIRECTORY_INFO *)data, cifs_sb);
+			fattr.cf_uniqueid = le64_to_cpu(
+			((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+			validinum = true;
 
-				cifs_buf_release(srchinf->ntwrk_buf_start);
-			}
-			kfree(srchinf);
-			if (rc)
-				goto cgii_exit;
+			cifs_buf_release(srchinf->ntwrk_buf_start);
+		}
+		kfree(srchinf);
+		if (rc)
+			goto cgii_exit;
 	} else
 		goto cgii_exit;
 
@@ -1715,6 +1722,10 @@
 	if (rc == 0 || rc != -EBUSY)
 		goto do_rename_exit;
 
+	/* Don't fall back to using SMB on SMB 2+ mount */
+	if (server->vals->protocol_id != 0)
+		goto do_rename_exit;
+
 	/* open-file renames don't work across directories */
 	if (to_dentry->d_parent != from_dentry->d_parent)
 		goto do_rename_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 50559a8..5e75df6 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -494,8 +494,7 @@
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &pCifsInode->flags);
 
-				queue_work(cifsoplockd_wq,
-					   &netfile->oplock_break);
+				cifs_queue_oplock_break(netfile);
 				netfile->oplock_break_cancelled = false;
 
 				spin_unlock(&tcon->open_file_lock);
@@ -592,6 +591,28 @@
 	spin_unlock(&cinode->writers_lock);
 }
 
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+	/*
+	 * Bump the handle refcount now while we hold the
+	 * open_file_lock to enforce the validity of it for the oplock
+	 * break handler. The matching put is done at the end of the
+	 * handler.
+	 */
+	cifsFileInfo_get(cfile);
+
+	queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 {
 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index efd72e1..f7a9ada 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -305,7 +305,7 @@
 	remaining = tgt_total_cnt - total_in_tgt;
 
 	if (remaining < 0) {
-		cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
+		cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
 			 tgt_total_cnt, total_in_tgt);
 		return -EPROTO;
 	}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 98c25b9..7e93d57 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -1034,7 +1034,8 @@
 	{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
 	"STATUS_UNFINISHED_CONTEXT_DELETED"},
 	{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
-	{STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+	/* Note that ENOATTTR and ENODATA are the same errno */
+	{STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
 	{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
 	{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
 	"STATUS_WRONG_CREDENTIAL_HANDLE"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e96a74d..9994d15 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -474,7 +474,6 @@
 	__u8 lease_state;
 	struct list_head *tmp;
 	struct cifsFileInfo *cfile;
-	struct TCP_Server_Info *server = tcon->ses->server;
 	struct cifs_pending_open *open;
 	struct cifsInodeInfo *cinode;
 	int ack_req = le32_to_cpu(rsp->Flags &
@@ -494,14 +493,26 @@
 		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
 			 le32_to_cpu(rsp->NewLeaseState));
 
-		server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
-
 		if (ack_req)
 			cfile->oplock_break_cancelled = false;
 		else
 			cfile->oplock_break_cancelled = true;
 
-		queue_work(cifsoplockd_wq, &cfile->oplock_break);
+		set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+
+		/*
+		 * Set or clear flags depending on the lease state being READ.
+		 * HANDLE caching flag should be added when the client starts
+		 * to defer closing remote file handles with HANDLE leases.
+		 */
+		if (lease_state & SMB2_LEASE_READ_CACHING_HE)
+			set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+				&cinode->flags);
+		else
+			clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+				  &cinode->flags);
+
+		cifs_queue_oplock_break(cfile);
 		kfree(lw);
 		return true;
 	}
@@ -645,8 +656,8 @@
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &cinode->flags);
 				spin_unlock(&cfile->file_info_lock);
-				queue_work(cifsoplockd_wq,
-					   &cfile->oplock_break);
+
+				cifs_queue_oplock_break(cfile);
 
 				spin_unlock(&tcon->open_file_lock);
 				spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 2db7968..f7ad2a3 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1377,6 +1377,15 @@
 }
 
 static void
+smb21_downgrade_oplock(struct TCP_Server_Info *server,
+		       struct cifsInodeInfo *cinode, bool set_level2)
+{
+	server->ops->set_oplock_level(cinode,
+				      set_level2 ? SMB2_LEASE_READ_CACHING_HE :
+				      0, 0, NULL);
+}
+
+static void
 smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
 		      unsigned int epoch, bool *purge_cache)
 {
@@ -1404,26 +1413,28 @@
 		       unsigned int epoch, bool *purge_cache)
 {
 	char message[5] = {0};
+	unsigned int new_oplock = 0;
 
 	oplock &= 0xFF;
 	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
 		return;
 
-	cinode->oplock = 0;
 	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_READ_FLG;
+		new_oplock |= CIFS_CACHE_READ_FLG;
 		strcat(message, "R");
 	}
 	if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
+		new_oplock |= CIFS_CACHE_HANDLE_FLG;
 		strcat(message, "H");
 	}
 	if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
-		cinode->oplock |= CIFS_CACHE_WRITE_FLG;
+		new_oplock |= CIFS_CACHE_WRITE_FLG;
 		strcat(message, "W");
 	}
-	if (!cinode->oplock)
-		strcat(message, "None");
+	if (!new_oplock)
+		strncpy(message, "None", sizeof(message));
+
+	cinode->oplock = new_oplock;
 	cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
 		 &cinode->vfs_inode);
 }
@@ -1681,7 +1692,7 @@
 	.print_stats = smb2_print_stats,
 	.is_oplock_break = smb2_is_valid_oplock_break,
 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
-	.downgrade_oplock = smb2_downgrade_oplock,
+	.downgrade_oplock = smb21_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
 	.negotiate_wsize = smb2_negotiate_wsize,
@@ -1765,7 +1776,7 @@
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
-	.downgrade_oplock = smb2_downgrade_oplock,
+	.downgrade_oplock = smb21_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
 	.negotiate_wsize = smb2_negotiate_wsize,
@@ -1855,7 +1866,7 @@
 	.dump_share_caps = smb2_dump_share_caps,
 	.is_oplock_break = smb2_is_valid_oplock_break,
 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
-	.downgrade_oplock = smb2_downgrade_oplock,
+	.downgrade_oplock = smb21_downgrade_oplock,
 	.need_neg = smb2_need_neg,
 	.negotiate = smb2_negotiate,
 	.negotiate_wsize = smb2_negotiate_wsize,
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 08b46e6..f0de238 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -1,15 +1,16 @@
 config FS_ENCRYPTION
-	tristate "FS Encryption (Per-file encryption)"
+	bool "FS Encryption (Per-file encryption)"
 	select CRYPTO
 	select CRYPTO_AES
 	select CRYPTO_CBC
 	select CRYPTO_ECB
 	select CRYPTO_XTS
 	select CRYPTO_CTS
-	select CRYPTO_CTR
+	select CRYPTO_SHA256
 	select KEYS
 	help
 	  Enable encryption of files and directories.  This
 	  feature is similar to ecryptfs, but it is more memory
 	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  decrypted pages in the page cache.  Currently Ext4,
+	  F2FS and UBIFS make use of this feature.
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 926e5df..56debb1 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -58,7 +58,7 @@
 		return err;
 
 	if (!fscrypt_has_permitted_context(dir, inode))
-		return -EPERM;
+		return -EXDEV;
 
 	return 0;
 }
@@ -82,13 +82,13 @@
 		if (IS_ENCRYPTED(new_dir) &&
 		    !fscrypt_has_permitted_context(new_dir,
 						   d_inode(old_dentry)))
-			return -EPERM;
+			return -EXDEV;
 
 		if ((flags & RENAME_EXCHANGE) &&
 		    IS_ENCRYPTED(old_dir) &&
 		    !fscrypt_has_permitted_context(old_dir,
 						   d_inode(new_dentry)))
-			return -EPERM;
+			return -EXDEV;
 	}
 	return 0;
 }
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 6a9c476..74e8d3d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -150,8 +150,7 @@
  * malicious offline violations of this constraint, while the link and rename
  * checks are needed to prevent online violations of this constraint.
  *
- * Return: 1 if permitted, 0 if forbidden.  If forbidden, the caller must fail
- * the filesystem operation with EPERM.
+ * Return: 1 if permitted, 0 if forbidden.
  */
 int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
 {
diff --git a/fs/dcache.c b/fs/dcache.c
index 75bbe7d..96c150d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1522,7 +1522,7 @@
 {
 	struct detach_data *data = _data;
 
-	if (!data->mountpoint && !data->select.found)
+	if (!data->mountpoint && list_empty(&data->select.dispose))
 		__d_drop(data->select.start);
 }
 
@@ -1564,17 +1564,15 @@
 
 		d_walk(dentry, &data, detach_and_collect, check_and_drop);
 
-		if (data.select.found)
+		if (!list_empty(&data.select.dispose))
 			shrink_dentry_list(&data.select.dispose);
+		else if (!data.mountpoint)
+			return;
 
 		if (data.mountpoint) {
 			detach_mounts(data.mountpoint);
 			dput(data.mountpoint);
 		}
-
-		if (!data.mountpoint && !data.select.found)
-			break;
-
 		cond_resched();
 	}
 }
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index ab8b867..01cde9f 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -167,19 +167,24 @@
 	return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	if (S_ISLNK(inode->i_mode))
 		kfree(inode->i_link);
+	free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
 	.statfs		= simple_statfs,
 	.remount_fs	= debugfs_remount,
 	.show_options	= debugfs_show_options,
-	.evict_inode	= debugfs_evict_inode,
+	.destroy_inode	= debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 108df2e..81be3ba 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -396,6 +396,7 @@
 	s->s_blocksize_bits = 10;
 	s->s_magic = DEVPTS_SUPER_MAGIC;
 	s->s_op = &devpts_sops;
+	s->s_d_op = &simple_dentry_operations;
 	s->s_time_gran = 1;
 
 	error = -ENOMEM;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 6cb042b..6fcb29b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -724,7 +724,8 @@
 {
 	loff_t res = EXT2_NDIR_BLOCKS;
 	int meta_blocks;
-	loff_t upper_limit;
+	unsigned int upper_limit;
+	unsigned int ppb = 1 << (bits-2);
 
 	/* This is calculated to be the largest file size for a
 	 * dense, file such that the total number of
@@ -738,24 +739,34 @@
 	/* total blocks in file system block size */
 	upper_limit >>= (bits - 9);
 
-
-	/* indirect blocks */
-	meta_blocks = 1;
-	/* double indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2));
-	/* tripple indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
-	upper_limit -= meta_blocks;
-	upper_limit <<= bits;
-
+	/* Compute how many blocks we can address by block tree */
 	res += 1LL << (bits-2);
 	res += 1LL << (2*(bits-2));
 	res += 1LL << (3*(bits-2));
-	res <<= bits;
-	if (res > upper_limit)
-		res = upper_limit;
+	/* Does block tree limit file size? */
+	if (res < upper_limit)
+		goto check_lfs;
 
+	res = upper_limit;
+	/* How many metadata blocks are needed for addressing upper_limit? */
+	upper_limit -= EXT2_NDIR_BLOCKS;
+	/* indirect blocks */
+	meta_blocks = 1;
+	upper_limit -= ppb;
+	/* double indirect blocks */
+	if (upper_limit < ppb * ppb) {
+		meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
+		res -= meta_blocks;
+		goto check_lfs;
+	}
+	meta_blocks += 1 + ppb;
+	upper_limit -= ppb * ppb;
+	/* tripple indirect blocks for the rest */
+	meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
+		DIV_ROUND_UP(upper_limit, ppb*ppb);
+	res -= meta_blocks;
+check_lfs:
+	res <<= bits;
 	if (res > MAX_LFS_FILESIZE)
 		res = MAX_LFS_FILESIZE;
 
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e9232a0..6eea530 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -103,10 +103,9 @@
 	depends on EXT4_FS
 	select FS_ENCRYPTION
 	help
-	  Enable encryption of ext4 files and directories.  This
-	  feature is similar to ecryptfs, but it is more memory
-	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  This kconfig symbol is deprecated; now it just selects
+	  FS_ENCRYPTION.  Use CONFIG_FS_ENCRYPTION=y in new config
+	  files
 
 config EXT4_FS_ENCRYPTION
 	bool "Ext4 FS Encryption"
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e16bc4c..4ffc5ed 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -109,7 +109,7 @@
 	int dir_has_error = 0;
 	struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
 
-	if (ext4_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_get_encryption_info(inode);
 		if (err && err != -ENOKEY)
 			return err;
@@ -136,7 +136,7 @@
 			return err;
 	}
 
-	if (ext4_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr);
 		if (err < 0)
 			return err;
@@ -243,7 +243,7 @@
 			offset += ext4_rec_len_from_disk(de->rec_len,
 					sb->s_blocksize);
 			if (le32_to_cpu(de->inode)) {
-				if (!ext4_encrypted_inode(inode)) {
+				if (!IS_ENCRYPTED(inode)) {
 					if (!dir_emit(ctx, de->name,
 					    de->name_len,
 					    le32_to_cpu(de->inode),
@@ -281,9 +281,7 @@
 done:
 	err = 0;
 errout:
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
 	fscrypt_fname_free_buffer(&fstr);
-#endif
 	brelse(bh);
 	return err;
 }
@@ -609,7 +607,7 @@
 
 static int ext4_dir_open(struct inode * inode, struct file * filp)
 {
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
 	return 0;
 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 7af8b5e..80ad976 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -38,7 +38,6 @@
 #include <linux/compat.h>
 #endif
 
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 /*
@@ -1337,7 +1336,7 @@
 #define EXT4_MF_FS_ABORTED		0x0002	/* Fatal error detected */
 #define EXT4_MF_TEST_DUMMY_ENCRYPTION	0x0004
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
 						EXT4_MF_TEST_DUMMY_ENCRYPTION))
 #else
@@ -2054,7 +2053,7 @@
 	const struct qstr *usr_fname;
 	struct fscrypt_str disk_name;
 	struct dx_hash_info hinfo;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	struct fscrypt_str crypto_buf;
 #endif
 };
@@ -2266,12 +2265,7 @@
 					      struct ext4_group_desc *gdp);
 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 
-static inline bool ext4_encrypted_inode(struct inode *inode)
-{
-	return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
-}
-
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static inline int ext4_fname_setup_filename(struct inode *dir,
 			const struct qstr *iname,
 			int lookup, struct ext4_filename *fname)
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index f976111..caa9315 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -391,7 +391,7 @@
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 
-	if (ext4_handle_valid(handle)) {
+	if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
 		ei->i_sync_tid = handle->h_transaction->t_tid;
 		if (datasync)
 			ei->i_datasync_tid = handle->h_transaction->t_tid;
@@ -418,7 +418,7 @@
 	    (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
 	    !test_opt(inode->i_sb, DELALLOC))) {
 		/* We do not support data journalling for encrypted data */
-		if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
+		if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
 			return EXT4_INODE_ORDERED_DATA_MODE;  /* ordered */
 		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
 	}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 054c385..2080bee 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1047,6 +1047,7 @@
 	__le32 border;
 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
 	int err = 0;
+	size_t ext_size = 0;
 
 	/* make decision: where to split? */
 	/* FIXME: now decision is simplest: at current extent */
@@ -1138,6 +1139,10 @@
 		le16_add_cpu(&neh->eh_entries, m);
 	}
 
+	/* zero out unused area in the extent block */
+	ext_size = sizeof(struct ext4_extent_header) +
+		sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
+	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
 	ext4_extent_block_csum_set(inode, neh);
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
@@ -1217,6 +1222,11 @@
 				sizeof(struct ext4_extent_idx) * m);
 			le16_add_cpu(&neh->eh_entries, m);
 		}
+		/* zero out unused area in the extent block */
+		ext_size = sizeof(struct ext4_extent_header) +
+		   (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
+		memset(bh->b_data + ext_size, 0,
+			inode->i_sb->s_blocksize - ext_size);
 		ext4_extent_block_csum_set(inode, neh);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
@@ -1282,6 +1292,7 @@
 	ext4_fsblk_t newblock, goal = 0;
 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 	int err = 0;
+	size_t ext_size = 0;
 
 	/* Try to prepend new index to old one */
 	if (ext_depth(inode))
@@ -1307,9 +1318,11 @@
 		goto out;
 	}
 
+	ext_size = sizeof(EXT4_I(inode)->i_data);
 	/* move top-level index/leaf into new block */
-	memmove(bh->b_data, EXT4_I(inode)->i_data,
-		sizeof(EXT4_I(inode)->i_data));
+	memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
+	/* zero out unused area in the extent block */
+	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
 
 	/* set size of new block */
 	neh = ext_block_hdr(bh);
@@ -3582,7 +3595,7 @@
 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
 			(inode->i_sb->s_blocksize_bits - 10);
 
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		max_zeroout = 0;
 
 	/*
@@ -4938,7 +4951,7 @@
 	 * leave it disabled for encrypted inodes for now.  This is a
 	 * bug we should fix....
 	 */
-	if (ext4_encrypted_inode(inode) &&
+	if (IS_ENCRYPTED(inode) &&
 	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
 		     FALLOC_FL_ZERO_RANGE)))
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 08fca4a..3badb49d 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -79,7 +79,7 @@
 	struct super_block *sb = inode->i_sb;
 	int blockmask = sb->s_blocksize - 1;
 
-	if (pos >= i_size_read(inode))
+	if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
 		return 0;
 
 	if ((pos | iov_iter_alignment(from)) & blockmask)
@@ -163,6 +163,13 @@
 	}
 
 	ret = __generic_file_write_iter(iocb, from);
+	/*
+	 * Unaligned direct AIO must be the only IO in flight. Otherwise
+	 * overlapping aligned IO after unaligned might result in data
+	 * corruption.
+	 */
+	if (ret == -EIOCBQUEUED && unaligned_aio)
+		ext4_unwritten_wait(inode);
 	inode_unlock(inode);
 
 	if (ret > 0)
@@ -294,15 +301,6 @@
 
 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
-	struct inode *inode = file->f_mapping->host;
-
-	if (ext4_encrypted_inode(inode)) {
-		int err = fscrypt_get_encryption_info(inode);
-		if (err)
-			return 0;
-		if (!fscrypt_has_encryption_key(inode))
-			return -ENOKEY;
-	}
 	file_accessed(file);
 	if (IS_DAX(file_inode(file))) {
 		vma->vm_ops = &ext4_dax_vm_ops;
@@ -318,7 +316,6 @@
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 	struct vfsmount *mnt = filp->f_path.mnt;
-	struct dentry *dir;
 	struct path path;
 	char buf[64], *cp;
 	int ret;
@@ -355,25 +352,11 @@
 			ext4_journal_stop(handle);
 		}
 	}
-	if (ext4_encrypted_inode(inode)) {
-		ret = fscrypt_get_encryption_info(inode);
-		if (ret)
-			return -EACCES;
-		if (!fscrypt_has_encryption_key(inode))
-			return -ENOKEY;
-	}
 
-	dir = dget_parent(file_dentry(filp));
-	if (ext4_encrypted_inode(d_inode(dir)) &&
-			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
-		ext4_warning(inode->i_sb,
-			     "Inconsistent encryption contexts: %lu/%lu",
-			     (unsigned long) d_inode(dir)->i_ino,
-			     (unsigned long) inode->i_ino);
-		dput(dir);
-		return -EPERM;
-	}
-	dput(dir);
+	ret = fscrypt_file_open(inode, filp);
+	if (ret)
+		return ret;
+
 	/*
 	 * Set up the jbd2_inode if we are opening the inode for
 	 * writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 004c088..e37e5f1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -742,9 +742,9 @@
 	if (!dir || !dir->i_nlink)
 		return ERR_PTR(-EPERM);
 
-	if ((ext4_encrypted_inode(dir) ||
+	if ((IS_ENCRYPTED(dir) ||
 	     DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) &&
-	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
+	     (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
 		err = fscrypt_get_encryption_info(dir);
 		if (err)
 			return ERR_PTR(err);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 58229c1..d2844fe 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1217,6 +1217,7 @@
 	ext4_lblk_t offsets[4], offsets2[4];
 	Indirect chain[4], chain2[4];
 	Indirect *partial, *partial2;
+	Indirect *p = NULL, *p2 = NULL;
 	ext4_lblk_t max_block;
 	__le32 nr = 0, nr2 = 0;
 	int n = 0, n2 = 0;
@@ -1258,7 +1259,7 @@
 		}
 
 
-		partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+		partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
 		if (nr) {
 			if (partial == chain) {
 				/* Shared branch grows from the inode */
@@ -1283,13 +1284,11 @@
 				partial->p + 1,
 				(__le32 *)partial->bh->b_data+addr_per_block,
 				(chain+n-1) - partial);
-			BUFFER_TRACE(partial->bh, "call brelse");
-			brelse(partial->bh);
 			partial--;
 		}
 
 end_range:
-		partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+		partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
 		if (nr2) {
 			if (partial2 == chain2) {
 				/*
@@ -1319,16 +1318,14 @@
 					   (__le32 *)partial2->bh->b_data,
 					   partial2->p,
 					   (chain2+n2-1) - partial2);
-			BUFFER_TRACE(partial2->bh, "call brelse");
-			brelse(partial2->bh);
 			partial2--;
 		}
 		goto do_indirects;
 	}
 
 	/* Punch happened within the same level (n == n2) */
-	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
-	partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+	partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+	partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
 
 	/* Free top, but only if partial2 isn't its subtree. */
 	if (nr) {
@@ -1385,11 +1382,7 @@
 					   partial->p + 1,
 					   partial2->p,
 					   (chain+n-1) - partial);
-			BUFFER_TRACE(partial->bh, "call brelse");
-			brelse(partial->bh);
-			BUFFER_TRACE(partial2->bh, "call brelse");
-			brelse(partial2->bh);
-			return 0;
+			goto cleanup;
 		}
 
 		/*
@@ -1404,8 +1397,6 @@
 					   partial->p + 1,
 					   (__le32 *)partial->bh->b_data+addr_per_block,
 					   (chain+n-1) - partial);
-			BUFFER_TRACE(partial->bh, "call brelse");
-			brelse(partial->bh);
 			partial--;
 		}
 		if (partial2 > chain2 && depth2 <= depth) {
@@ -1413,11 +1404,21 @@
 					   (__le32 *)partial2->bh->b_data,
 					   partial2->p,
 					   (chain2+n2-1) - partial2);
-			BUFFER_TRACE(partial2->bh, "call brelse");
-			brelse(partial2->bh);
 			partial2--;
 		}
 	}
+
+cleanup:
+	while (p && p > chain) {
+		BUFFER_TRACE(p->bh, "call brelse");
+		brelse(p->bh);
+		p--;
+	}
+	while (p2 && p2 > chain2) {
+		BUFFER_TRACE(p2->bh, "call brelse");
+		brelse(p2->bh);
+		p2--;
+	}
 	return 0;
 
 do_indirects:
@@ -1425,7 +1426,7 @@
 	switch (offsets[0]) {
 	default:
 		if (++n >= n2)
-			return 0;
+			break;
 		nr = i_data[EXT4_IND_BLOCK];
 		if (nr) {
 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
@@ -1433,7 +1434,7 @@
 		}
 	case EXT4_IND_BLOCK:
 		if (++n >= n2)
-			return 0;
+			break;
 		nr = i_data[EXT4_DIND_BLOCK];
 		if (nr) {
 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
@@ -1441,7 +1442,7 @@
 		}
 	case EXT4_DIND_BLOCK:
 		if (++n >= n2)
-			return 0;
+			break;
 		nr = i_data[EXT4_TIND_BLOCK];
 		if (nr) {
 			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
@@ -1450,5 +1451,5 @@
 	case EXT4_TIND_BLOCK:
 		;
 	}
-	return 0;
+	goto cleanup;
 }
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index faba369..b5e1837 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -392,7 +392,7 @@
 {
 	int ret;
 
-	if (ext4_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_zeroout_range(inode, lblk, pblk, len);
 
 	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@@ -1084,7 +1084,7 @@
 	return ret;
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 				  get_block_t *get_block)
 {
@@ -1150,7 +1150,7 @@
 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
 		    !buffer_unwritten(bh) &&
 		    (block_start < from || block_end > to)) {
-			decrypt = ext4_encrypted_inode(inode) &&
+			decrypt = IS_ENCRYPTED(inode) &&
 				S_ISREG(inode->i_mode) &&
 				!fscrypt_using_hardware_encryption(inode);
 			ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
@@ -1247,7 +1247,7 @@
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	if (ext4_should_dioread_nolock(inode))
 		ret = ext4_block_write_begin(page, pos, len,
 					     ext4_get_block_unwritten);
@@ -2989,7 +2989,7 @@
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ret = ext4_block_write_begin(page, pos, len,
 				     ext4_da_get_block_prep);
 #else
@@ -3502,7 +3502,7 @@
 		dio_flags = DIO_LOCKING;
 	}
 #if defined(CONFIG_EXT4_FS_ENCRYPTION)
-	WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+	WARN_ON(IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)
 		&& !fscrypt_using_hardware_encryption(inode));
 #endif
 	if (IS_DAX(inode)) {
@@ -3624,9 +3624,8 @@
 	ssize_t ret;
 	int rw = iov_iter_rw(iter);
 
-#if defined(CONFIG_EXT4_FS_ENCRYPTION)
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
-		&& !fscrypt_using_hardware_encryption(inode))
+#ifdef CONFIG_FS_ENCRYPTION
+	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
 		return 0;
 #endif
 
@@ -3818,7 +3817,7 @@
 	if (!buffer_uptodate(bh)) {
 		err = -EIO;
 		decrypt = S_ISREG(inode->i_mode) &&
-			ext4_encrypted_inode(inode) &&
+			IS_ENCRYPTED(inode) &&
 			!fscrypt_using_hardware_encryption(inode);
 		ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
 		wait_on_buffer(bh);
@@ -3899,7 +3898,7 @@
 	struct inode *inode = mapping->host;
 
 	/* If we are processing an encrypted inode during orphan list handling */
-	if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
+	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
 		return 0;
 
 	blocksize = inode->i_sb->s_blocksize;
@@ -4639,6 +4638,7 @@
 		 * not initialized on a new filesystem. */
 	}
 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+	ext4_set_inode_flags(inode);
 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
 	if (ext4_has_feature_64bit(sb))
@@ -4746,7 +4746,7 @@
 		inode->i_op = &ext4_dir_inode_operations;
 		inode->i_fop = &ext4_dir_operations;
 	} else if (S_ISLNK(inode->i_mode)) {
-		if (ext4_encrypted_inode(inode)) {
+		if (IS_ENCRYPTED(inode)) {
 			inode->i_op = &ext4_encrypted_symlink_inode_operations;
 			ext4_set_aops(inode);
 		} else if (ext4_inode_is_fast_symlink(inode)) {
@@ -4776,7 +4776,7 @@
 		goto bad_inode;
 	}
 	brelse(iloc.bh);
-	ext4_set_inode_flags(inode);
+
 	unlock_new_inode(inode);
 	return inode;
 
@@ -5198,6 +5198,10 @@
 	if (error)
 		return error;
 
+	error = fscrypt_prepare_setattr(dentry, attr);
+	if (error)
+		return error;
+
 	if (is_quota_modification(inode, attr)) {
 		error = dquot_initialize(inode);
 		if (error)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e70500d..eb72cc8 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -191,7 +191,7 @@
 	return err;
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int uuid_is_zero(__u8 u[16])
 {
 	int	i;
@@ -729,7 +729,7 @@
 		if (err == 0)
 			err = err2;
 		mnt_drop_write_file(filp);
-		if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
+		if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
 		    ext4_has_group_desc_csum(sb) &&
 		    test_opt(sb, INIT_INODE_TABLE))
 			err = ext4_register_li_request(sb, o_group);
@@ -756,6 +756,13 @@
 		if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secure_erase(q))
 			return -EOPNOTSUPP;
 
+		/*
+		 * We haven't replayed the journal, so we cannot use our
+		 * block-bitmap-guided storage zapping commands.
+		 */
+		if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
+			return -EROFS;
+
 		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
 		    sizeof(range)))
 			return -EFAULT;
@@ -781,7 +788,7 @@
 		return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
 	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		int err, err2;
 		struct ext4_sb_info *sbi = EXT4_SB(sb);
 		handle_t *handle;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index c4434bd..8a41a77 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -602,8 +602,7 @@
 		return -EOPNOTSUPP;
 	}
 
-	if (ext4_encrypted_inode(orig_inode) ||
-	    ext4_encrypted_inode(donor_inode)) {
+	if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
 		ext4_msg(orig_inode->i_sb, KERN_ERR,
 			 "Online defrag not supported for encrypted files");
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b7b58b1..6817a89 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -610,7 +610,7 @@
 		{
 			if (show_names)
 			{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 				int len;
 				char *name;
 				struct fscrypt_str fname_crypto_str =
@@ -619,7 +619,7 @@
 
 				name  = de->name;
 				len = de->name_len;
-				if (ext4_encrypted_inode(dir))
+				if (IS_ENCRYPTED(dir))
 					res = fscrypt_get_encryption_info(dir);
 				if (res) {
 					printk(KERN_WARNING "Error setting up"
@@ -968,9 +968,9 @@
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	/* Check if the directory is encrypted */
-	if (ext4_encrypted_inode(dir)) {
+	if (IS_ENCRYPTED(dir)) {
 		err = fscrypt_get_encryption_info(dir);
 		if (err < 0) {
 			brelse(bh);
@@ -999,7 +999,7 @@
 			continue;
 		if (de->inode == 0)
 			continue;
-		if (!ext4_encrypted_inode(dir)) {
+		if (!IS_ENCRYPTED(dir)) {
 			tmp_str.name = de->name;
 			tmp_str.len = de->name_len;
 			err = ext4_htree_store_dirent(dir_file,
@@ -1031,7 +1031,7 @@
 	}
 errout:
 	brelse(bh);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	fscrypt_fname_free_buffer(&fname_crypto_str);
 #endif
 	return count;
@@ -1252,7 +1252,7 @@
 
 	f.usr_fname = fname->usr_fname;
 	f.disk_name = fname->disk_name;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	f.crypto_buf = fname->crypto_buf;
 #endif
 	return fscrypt_match_name(&f, de->name, de->name_len);
@@ -1499,7 +1499,7 @@
 	ext4_lblk_t block;
 	int retval;
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	*res_dir = NULL;
 #endif
 	frame = dx_probe(fname, dir, NULL, frames);
@@ -1579,7 +1579,7 @@
 					 ino);
 			return ERR_PTR(-EFSCORRUPTED);
 		}
-		if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+		if (!IS_ERR(inode) && IS_ENCRYPTED(dir) &&
 		    (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 		    !fscrypt_has_permitted_context(dir, inode)) {
 			ext4_warning(inode->i_sb,
@@ -3148,9 +3148,10 @@
 
 	if (inode->i_nlink >= EXT4_LINK_MAX)
 		return -EMLINK;
-	if (ext4_encrypted_inode(dir) &&
-			!fscrypt_has_permitted_context(dir, inode))
-		return -EPERM;
+
+	err = fscrypt_prepare_link(old_dentry, dir, dentry);
+	if (err)
+		return err;
 
        if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) &&
 	   (!projid_eq(EXT4_I(dir)->i_projid,
@@ -3448,12 +3449,6 @@
 			EXT4_I(old_dentry->d_inode)->i_projid)))
 		return -EXDEV;
 
-	if ((ext4_encrypted_inode(old_dir) &&
-	     !fscrypt_has_encryption_key(old_dir)) ||
-	    (ext4_encrypted_inode(new_dir) &&
-	     !fscrypt_has_encryption_key(new_dir)))
-		return -ENOKEY;
-
 	retval = dquot_initialize(old.dir);
 	if (retval)
 		return retval;
@@ -3482,13 +3477,6 @@
 	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
 		goto end_rename;
 
-	if ((old.dir != new.dir) &&
-	    ext4_encrypted_inode(new.dir) &&
-	    !fscrypt_has_permitted_context(new.dir, old.inode)) {
-		retval = -EPERM;
-		goto end_rename;
-	}
-
 	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
 				 &new.de, &new.inlined);
 	if (IS_ERR(new.bh)) {
@@ -3653,19 +3641,6 @@
 	u8 new_file_type;
 	int retval;
 
-	if ((ext4_encrypted_inode(old_dir) &&
-	     !fscrypt_has_encryption_key(old_dir)) ||
-	    (ext4_encrypted_inode(new_dir) &&
-	     !fscrypt_has_encryption_key(new_dir)))
-		return -ENOKEY;
-
-	if ((ext4_encrypted_inode(old_dir) ||
-	     ext4_encrypted_inode(new_dir)) &&
-	    (old_dir != new_dir) &&
-	    (!fscrypt_has_permitted_context(new_dir, old.inode) ||
-	     !fscrypt_has_permitted_context(old_dir, new.inode)))
-		return -EPERM;
-
 	if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
 	     !projid_eq(EXT4_I(new_dir)->i_projid,
 			EXT4_I(old_dentry->d_inode)->i_projid)) ||
@@ -3791,9 +3766,16 @@
 			struct inode *new_dir, struct dentry *new_dentry,
 			unsigned int flags)
 {
+	int err;
+
 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
 		return -EINVAL;
 
+	err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+				     flags);
+	if (err)
+		return err;
+
 	if (flags & RENAME_EXCHANGE) {
 		return ext4_cross_rename(old_dir, old_dentry,
 					 new_dir, new_dentry);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 228baa5..55bda9f 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -66,7 +66,7 @@
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		struct page *data_page = NULL;
 #endif
 		struct buffer_head *bh, *head;
@@ -78,7 +78,7 @@
 		if (!page)
 			continue;
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		if (!page->mapping) {
 			/* The bounce data pages are unmapped. */
 			data_page = page;
@@ -111,7 +111,7 @@
 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
 		local_irq_restore(flags);
 		if (!under_io) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 			if (data_page)
 				fscrypt_restore_control_page(data_page);
 #endif
@@ -468,8 +468,7 @@
 
 	bh = head = page_buffers(page);
 
-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
-	    nr_to_submit) {
+	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) {
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 270c4f4..8e38121 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -49,7 +49,7 @@
 
 static inline bool ext4_bio_encrypted(struct bio *bio)
 {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	return unlikely(bio->bi_private != NULL);
 #else
 	return false;
@@ -280,8 +280,7 @@
 		if (bio == NULL) {
 			struct fscrypt_ctx *ctx = NULL;
 
-			if (ext4_encrypted_inode(inode) &&
-			    S_ISREG(inode->i_mode)) {
+			if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
 				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
 				if (IS_ERR(ctx))
 					goto set_error_page;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 58e6b8a..aef2a24 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -907,11 +907,18 @@
 	memcpy(n_group_desc, o_group_desc,
 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
 	n_group_desc[gdb_num] = gdb_bh;
+
+	BUFFER_TRACE(gdb_bh, "get_write_access");
+	err = ext4_journal_get_write_access(handle, gdb_bh);
+	if (err) {
+		kvfree(n_group_desc);
+		brelse(gdb_bh);
+		return err;
+	}
+
 	EXT4_SB(sb)->s_group_desc = n_group_desc;
 	EXT4_SB(sb)->s_gdb_count++;
 	kvfree(o_group_desc);
-	BUFFER_TRACE(gdb_bh, "get_write_access");
-	err = ext4_journal_get_write_access(handle, gdb_bh);
 	return err;
 }
 
@@ -1928,7 +1935,8 @@
 				le16_to_cpu(es->s_reserved_gdt_blocks);
 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
 			n_blocks_count = (ext4_fsblk_t)n_group *
-				EXT4_BLOCKS_PER_GROUP(sb);
+				EXT4_BLOCKS_PER_GROUP(sb) +
+				le32_to_cpu(es->s_first_data_block);
 			n_group--; /* set to last group number */
 		}
 
@@ -2039,6 +2047,10 @@
 		free_flex_gd(flex_gd);
 	if (resize_inode != NULL)
 		iput(resize_inode);
-	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
+	if (err)
+		ext4_warning(sb, "error (%d) occurred during "
+			     "file system resize", err);
+	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
+		 ext4_blocks_count(es));
 	return err;
 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dd851f9..a8f99b9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1104,7 +1104,7 @@
 	return try_to_free_buffers(page);
 }
 
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
 {
 	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
@@ -1765,7 +1765,7 @@
 		*journal_ioprio =
 			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
 	} else if (token == Opt_test_dummy_encryption) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
 		ext4_msg(sb, KERN_WARNING,
 			 "Test dummy encryption mode enabled");
@@ -3978,7 +3978,7 @@
 	sb->s_op = &ext4_sops;
 	sb->s_export_op = &ext4_export_ops;
 	sb->s_xattr = ext4_xattr_handlers;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	sb->s_cop = &ext4_cryptops;
 #endif
 #ifdef CONFIG_QUOTA
@@ -4040,7 +4040,7 @@
 				 "data=, fs mounted w/o journal");
 			goto failed_mount_wq;
 		}
-		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
+		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
 		clear_opt(sb, JOURNAL_CHECKSUM);
 		clear_opt(sb, DATA_FLAGS);
 		sbi->s_journal = NULL;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 54942d6..dc7b155 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -223,7 +223,7 @@
 EXT4_ATTR_FEATURE(lazy_itable_init);
 EXT4_ATTR_FEATURE(batched_discard);
 EXT4_ATTR_FEATURE(meta_bg_resize);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 EXT4_ATTR_FEATURE(encryption);
 #endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
@@ -232,7 +232,7 @@
 	ATTR_LIST(lazy_itable_init),
 	ATTR_LIST(batched_discard),
 	ATTR_LIST(meta_bg_resize),
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ATTR_LIST(encryption),
 #endif
 	ATTR_LIST(metadata_csum_seed),
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 378c221..976b1a8 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -3,6 +3,7 @@
 	depends on BLOCK
 	select CRYPTO
 	select CRYPTO_CRC32
+	select F2FS_FS_XATTR if FS_ENCRYPTION
 	help
 	  F2FS is based on Log-structured File System (LFS), which supports
 	  versatile "flash-friendly" features. The design has been focused on
@@ -77,13 +78,11 @@
 config F2FS_FS_ENCRYPTION
 	bool "F2FS Encryption"
 	depends on F2FS_FS
-	depends on F2FS_FS_XATTR
 	select FS_ENCRYPTION
 	help
-	  Enable encryption of f2fs files and directories.  This
-	  feature is similar to ecryptfs, but it is more memory
-	  efficient since it avoids caching the encrypted and
-	  decrypted pages in the page cache.
+	  This kconfig symbol is deprecated; now it just selects
+	  FS_ENCRYPTION.  Use CONFIG_FS_ENCRYPTION=y in new config
+	  files.
 
 config F2FS_IO_TRACE
 	bool "F2FS IO tracer"
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index b32efb5..279adb4 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -285,7 +285,7 @@
 	/* assert(atomic_read(acl->a_refcount) == 1); */
 
 	FOREACH_ACL_ENTRY(pa, acl, pe) {
-		switch(pa->e_tag) {
+		switch (pa->e_tag) {
 		case ACL_USER_OBJ:
 			pa->e_perm &= (mode >> 6) | ~S_IRWXO;
 			mode &= (pa->e_perm << 6) | ~S_IRWXU;
@@ -326,7 +326,7 @@
 	}
 
 	*mode_p = (*mode_p & ~S_IRWXUGO) | mode;
-        return not_equiv;
+	return not_equiv;
 }
 
 static int f2fs_acl_create(struct inode *dir, umode_t *mode,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d27d3c4..d4c5310 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -66,7 +66,7 @@
 		.old_blkaddr = index,
 		.new_blkaddr = index,
 		.encrypted_page = NULL,
-		.is_meta = is_meta,
+		.is_por = !is_meta,
 	};
 	int err;
 
@@ -130,6 +130,30 @@
 	return __get_meta_page(sbi, index, false);
 }
 
+static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
+							int type)
+{
+	struct seg_entry *se;
+	unsigned int segno, offset;
+	bool exist;
+
+	if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
+		return true;
+
+	segno = GET_SEGNO(sbi, blkaddr);
+	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+	se = get_seg_entry(sbi, segno);
+
+	exist = f2fs_test_bit(offset, se->cur_valid_map);
+	if (!exist && type == DATA_GENERIC_ENHANCE) {
+		f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+			"blkaddr:%u, sit bitmap:%d", blkaddr, exist);
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		WARN_ON(1);
+	}
+	return exist;
+}
+
 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type)
 {
@@ -151,15 +175,22 @@
 			return false;
 		break;
 	case META_POR:
-	case DATA_GENERIC:
 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
-			blkaddr < MAIN_BLKADDR(sbi))) {
-			if (type == DATA_GENERIC) {
-				f2fs_msg(sbi->sb, KERN_WARNING,
-					"access invalid blkaddr:%u", blkaddr);
-				WARN_ON(1);
-			}
+			blkaddr < MAIN_BLKADDR(sbi)))
 			return false;
+		break;
+	case DATA_GENERIC:
+	case DATA_GENERIC_ENHANCE:
+	case DATA_GENERIC_ENHANCE_READ:
+		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
+				blkaddr < MAIN_BLKADDR(sbi))) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"access invalid blkaddr:%u", blkaddr);
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			WARN_ON(1);
+			return false;
+		} else {
+			return __is_bitmap_valid(sbi, blkaddr, type);
 		}
 		break;
 	case META_GENERIC:
@@ -189,7 +220,7 @@
 		.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
 		.encrypted_page = NULL,
 		.in_list = false,
-		.is_meta = (type != META_POR),
+		.is_por = (type == META_POR),
 	};
 	struct blk_plug plug;
 
@@ -306,8 +337,9 @@
 		goto skip_write;
 
 	/* collect a number of dirty meta pages and write together */
-	if (wbc->for_kupdate ||
-		get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
+	if (wbc->sync_mode != WB_SYNC_ALL &&
+			get_pages(sbi, F2FS_DIRTY_META) <
+					nr_pages_to_skip(sbi, META))
 		goto skip_write;
 
 	/* if locked failed, cp will flush dirty pages instead */
@@ -405,7 +437,7 @@
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
-		SetPagePrivate(page);
+		f2fs_set_page_private(page, 0);
 		f2fs_trace_pid(page);
 		return 1;
 	}
@@ -643,6 +675,12 @@
 	if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
 		return 0;
 
+	if (bdev_read_only(sbi->sb->s_bdev)) {
+		f2fs_msg(sbi->sb, KERN_INFO, "write access "
+			"unavailable, skipping orphan cleanup");
+		return 0;
+	}
+
 	if (s_flags & MS_RDONLY) {
 		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
 		sbi->sb->s_flags &= ~MS_RDONLY;
@@ -757,13 +795,27 @@
 	}
 }
 
+static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
+						struct f2fs_checkpoint *ckpt)
+{
+	unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
+	__u32 chksum;
+
+	chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
+	if (chksum_ofs < CP_CHKSUM_OFFSET) {
+		chksum_ofs += sizeof(chksum);
+		chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
+						F2FS_BLKSIZE - chksum_ofs);
+	}
+	return chksum;
+}
+
 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
 		struct f2fs_checkpoint **cp_block, struct page **cp_page,
 		unsigned long long *version)
 {
-	unsigned long blk_size = sbi->blocksize;
 	size_t crc_offset = 0;
-	__u32 crc = 0;
+	__u32 crc;
 
 	*cp_page = f2fs_get_meta_page(sbi, cp_addr);
 	if (IS_ERR(*cp_page))
@@ -772,15 +824,27 @@
 	*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
 	crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
-	if (crc_offset > (blk_size - sizeof(__le32))) {
+	if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
+			crc_offset > CP_CHKSUM_OFFSET) {
 		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"invalid crc_offset: %zu", crc_offset);
 		return -EINVAL;
 	}
 
-	crc = cur_cp_crc(*cp_block);
-	if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+	if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) {
+		if (crc_offset != CP_MIN_CHKSUM_OFFSET) {
+			f2fs_put_page(*cp_page, 1);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"layout of large_nat_bitmap is deprecated, "
+				"run fsck to repair, chksum_offset: %zu",
+				crc_offset);
+			return -EINVAL;
+		}
+	}
+
+	crc = f2fs_checkpoint_chksum(sbi, *cp_block);
+	if (crc != cur_cp_crc(*cp_block)) {
 		f2fs_put_page(*cp_page, 1);
 		f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
 		return -EINVAL;
@@ -956,7 +1020,7 @@
 	inode_inc_dirty_pages(inode);
 	spin_unlock(&sbi->inode_lock[type]);
 
-	SetPagePrivate(page);
+	f2fs_set_page_private(page, 0);
 	f2fs_trace_pid(page);
 }
 
@@ -1008,13 +1072,11 @@
 	if (inode) {
 		unsigned long cur_ino = inode->i_ino;
 
-		if (is_dir)
-			F2FS_I(inode)->cp_task = current;
+		F2FS_I(inode)->cp_task = current;
 
 		filemap_fdatawrite(inode->i_mapping);
 
-		if (is_dir)
-			F2FS_I(inode)->cp_task = NULL;
+		F2FS_I(inode)->cp_task = NULL;
 
 		iput(inode);
 		/* We need to give cpu to another writers. */
@@ -1259,10 +1321,17 @@
 	else
 		__clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
 
+	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
+		__set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
+	else
+		__clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
+
 	if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
 		__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
-	else
-		__clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+	/*
+	 * TODO: we count on fsck.f2fs to clear this flag until we figure out
+	 * missing cases which clear it incorrectly.
+	 */
 
 	if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
 		__set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
@@ -1383,7 +1452,7 @@
 	get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
 	get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
 
-	crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
+	crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
 	*((__le32 *)((unsigned char *)ckpt +
 				le32_to_cpu(ckpt->checksum_offset)))
 				= cpu_to_le32(crc32);
@@ -1467,7 +1536,11 @@
 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
 	clear_sbi_flag(sbi, SBI_NEED_CP);
 	clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+
+	spin_lock(&sbi->stat_lock);
 	sbi->unusable_block_count = 0;
+	spin_unlock(&sbi->stat_lock);
+
 	__set_cp_next_pack(sbi);
 
 	/*
@@ -1492,6 +1565,9 @@
 	unsigned long long ckpt_ver;
 	int err = 0;
 
+	if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
+		return -EROFS;
+
 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 		if (cpc->reason != CP_PAUSE)
 			return 0;
@@ -1508,10 +1584,6 @@
 		err = -EIO;
 		goto out;
 	}
-	if (f2fs_readonly(sbi->sb)) {
-		err = -EROFS;
-		goto out;
-	}
 
 	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 22e0c99..ae2e39f 100755
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -226,12 +226,14 @@
 	struct block_device *bdev = sbi->sb->s_bdev;
 	int i;
 
-	for (i = 0; i < sbi->s_ndevs; i++) {
-		if (FDEV(i).start_blk <= blk_addr &&
-					FDEV(i).end_blk >= blk_addr) {
-			blk_addr -= FDEV(i).start_blk;
-			bdev = FDEV(i).bdev;
-			break;
+	if (f2fs_is_multi_device(sbi)) {
+		for (i = 0; i < sbi->s_ndevs; i++) {
+			if (FDEV(i).start_blk <= blk_addr &&
+			    FDEV(i).end_blk >= blk_addr) {
+				blk_addr -= FDEV(i).start_blk;
+				bdev = FDEV(i).bdev;
+				break;
+			}
 		}
 	}
 	if (bio) {
@@ -245,6 +247,9 @@
 {
 	int i;
 
+	if (!f2fs_is_multi_device(sbi))
+		return 0;
+
 	for (i = 0; i < sbi->s_ndevs; i++)
 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
 			return i;
@@ -306,9 +311,10 @@
 		for (; start < F2FS_IO_SIZE(sbi); start++) {
 			struct page *page =
 				mempool_alloc(sbi->write_io_dummy,
-					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
+					      GFP_NOIO | __GFP_NOFAIL);
 			f2fs_bug_on(sbi, !page);
 
+			zero_user_segment(page, 0, PAGE_SIZE);
 			SetPagePrivate(page);
 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
 			lock_page(page);
@@ -452,7 +458,7 @@
 
 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
 {
-	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
+	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
 }
 
 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
@@ -481,7 +487,8 @@
 	struct inode *inode = fio->page->mapping->host;
 
 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
-			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+			fio->is_por ? META_POR : (__is_meta_io(fio) ?
+			META_GENERIC : DATA_GENERIC_ENHANCE)))
 		return -EFAULT;
 
 	trace_f2fs_submit_page_bio(page, fio);
@@ -541,9 +548,7 @@
 		spin_unlock(&io->io_lock);
 	}
 
-	if (__is_valid_data_blkaddr(fio->old_blkaddr))
-		verify_block_addr(fio, fio->old_blkaddr);
-	verify_block_addr(fio, fio->new_blkaddr);
+	verify_fio_blkaddr(fio);
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
 	fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
@@ -614,16 +619,13 @@
 	struct bio_post_read_ctx *ctx;
 	unsigned int post_read_steps = 0;
 
-	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
-		return ERR_PTR(-EFAULT);
-
 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
 	if (!bio)
 		return ERR_PTR(-ENOMEM);
 	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
 	bio_set_op_attrs(bio, REQ_OP_READ,
-			 (f2fs_encrypted_inode(inode) ?
+			 (IS_ENCRYPTED(inode) ?
 			  REQ_NOENCRYPT :
 			  op_flag));
 
@@ -648,8 +650,10 @@
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
 							block_t blkaddr)
 {
-	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct bio *bio;
 
+	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
@@ -664,8 +668,8 @@
 		return -EFAULT;
 	}
 	ClearPageError(page);
-	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-	__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+	inc_page_count(sbi, F2FS_RD_DATA);
+	__f2fs_submit_read_bio(sbi, bio, DATA);
 	return 0;
 }
 
@@ -793,6 +797,11 @@
 
 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE_READ)) {
+			err = -EFAULT;
+			goto put_err;
+		}
 		goto got_it;
 	}
 
@@ -806,6 +815,13 @@
 		err = -ENOENT;
 		goto put_err;
 	}
+	if (dn.data_blkaddr != NEW_ADDR &&
+			!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+						dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE)) {
+		err = -EFAULT;
+		goto put_err;
+	}
 got_it:
 	if (PageUptodate(page)) {
 		unlock_page(page);
@@ -1148,12 +1164,12 @@
 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 
 	if (__is_valid_data_blkaddr(blkaddr) &&
-		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
 		err = -EFAULT;
 		goto sync_out;
 	}
 
-	if (is_valid_data_blkaddr(sbi, blkaddr)) {
+	if (__is_valid_data_blkaddr(blkaddr)) {
 		/* use out-place-update for driect IO under LFS mode */
 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
 							map->m_may_create) {
@@ -1530,7 +1546,7 @@
 	}
 
 	if (size) {
-		if (f2fs_encrypted_inode(inode))
+		if (IS_ENCRYPTED(inode))
 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
 
 		ret = fiemap_fill_next_extent(fieinfo, logical,
@@ -1563,6 +1579,130 @@
 	return ret;
 }
 
+static int f2fs_read_single_page(struct inode *inode, struct page *page,
+					unsigned nr_pages,
+					struct f2fs_map_blocks *map,
+					struct bio **bio_ret,
+					sector_t *last_block_in_bio,
+					bool is_readahead)
+{
+	struct bio *bio = *bio_ret;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	sector_t block_in_file;
+	sector_t last_block;
+	sector_t last_block_in_file;
+	sector_t block_nr;
+	bool bio_encrypted;
+	u64 dun;
+	int ret = 0;
+
+	block_in_file = (sector_t)page->index;
+	last_block = block_in_file + nr_pages;
+	last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
+							blkbits;
+	if (last_block > last_block_in_file)
+		last_block = last_block_in_file;
+
+	/* just zeroing out page which is beyond EOF */
+	if (block_in_file >= last_block)
+		goto zero_out;
+	/*
+	 * Map blocks using the previous result first.
+	 */
+	if ((map->m_flags & F2FS_MAP_MAPPED) &&
+			block_in_file > map->m_lblk &&
+			block_in_file < (map->m_lblk + map->m_len))
+		goto got_it;
+
+	/*
+	 * Then do more f2fs_map_blocks() calls until we are
+	 * done with this page.
+	 */
+	map->m_lblk = block_in_file;
+	map->m_len = last_block - block_in_file;
+
+	ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
+	if (ret)
+		goto out;
+got_it:
+	if ((map->m_flags & F2FS_MAP_MAPPED)) {
+		block_nr = map->m_pblk + block_in_file - map->m_lblk;
+		SetPageMappedToDisk(page);
+
+		if (!PageUptodate(page) && !cleancache_get_page(page)) {
+			SetPageUptodate(page);
+			goto confused;
+		}
+
+		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+							DATA_GENERIC_ENHANCE_READ)) {
+			ret = -EFAULT;
+			goto out;
+		}
+	} else {
+zero_out:
+		zero_user_segment(page, 0, PAGE_SIZE);
+		if (!PageUptodate(page))
+			SetPageUptodate(page);
+		unlock_page(page);
+		goto out;
+	}
+
+	/*
+	 * This page will go to BIO.  Do we need to send this
+	 * BIO off first?
+	 */
+	if (bio && (*last_block_in_bio != block_nr - 1 ||
+		!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
+submit_and_realloc:
+		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+
+	dun = PG_DUN(inode, page);
+	bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+	if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
+		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+
+	if (bio == NULL) {
+		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+				is_readahead ? REQ_RAHEAD : 0);
+		if (IS_ERR(bio)) {
+			ret = PTR_ERR(bio);
+			bio = NULL;
+			goto out;
+		}
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, bio, dun);
+	}
+
+	/*
+	 * If the page is under writeback, we need to wait for
+	 * its completion to see the correct decrypted data.
+	 */
+	f2fs_wait_on_block_writeback(inode, block_nr);
+
+	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+		goto submit_and_realloc;
+
+	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+	ClearPageError(page);
+	*last_block_in_bio = block_nr;
+	goto out;
+confused:
+	if (bio) {
+		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		bio = NULL;
+	}
+	unlock_page(page);
+out:
+	*bio_ret = bio;
+	return ret;
+}
+
 /*
  * This function was originally taken from fs/mpage.c, and customized for f2fs.
  * Major change was from block_size == page_size in f2fs by default.
@@ -1579,15 +1719,8 @@
 	struct bio *bio = NULL;
 	sector_t last_block_in_bio = 0;
 	struct inode *inode = mapping->host;
-	const unsigned blkbits = inode->i_blkbits;
-	const unsigned blocksize = 1 << blkbits;
-	sector_t block_in_file;
-	sector_t last_block;
-	sector_t last_block_in_file;
-	sector_t block_nr;
 	struct f2fs_map_blocks map;
-	bool bio_encrypted;
-	u64 dun;
+	int ret = 0;
 
 	map.m_pblk = 0;
 	map.m_lblk = 0;
@@ -1610,117 +1743,22 @@
 				goto next_page;
 		}
 
-		block_in_file = (sector_t)page->index;
-		last_block = block_in_file + nr_pages;
-		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
-								blkbits;
-		if (last_block > last_block_in_file)
-			last_block = last_block_in_file;
-
-		/*
-		 * Map blocks using the previous result first.
-		 */
-		if ((map.m_flags & F2FS_MAP_MAPPED) &&
-				block_in_file > map.m_lblk &&
-				block_in_file < (map.m_lblk + map.m_len))
-			goto got_it;
-
-		/*
-		 * Then do more f2fs_map_blocks() calls until we are
-		 * done with this page.
-		 */
-		map.m_flags = 0;
-
-		if (block_in_file < last_block) {
-			map.m_lblk = block_in_file;
-			map.m_len = last_block - block_in_file;
-
-			if (f2fs_map_blocks(inode, &map, 0,
-						F2FS_GET_BLOCK_DEFAULT))
-				goto set_error_page;
-		}
-got_it:
-		if ((map.m_flags & F2FS_MAP_MAPPED)) {
-			block_nr = map.m_pblk + block_in_file - map.m_lblk;
-			SetPageMappedToDisk(page);
-
-			if (!PageUptodate(page) && !cleancache_get_page(page)) {
-				SetPageUptodate(page);
-				goto confused;
-			}
-
-			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
-								DATA_GENERIC))
-				goto set_error_page;
-		} else {
+		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
+					&last_block_in_bio, is_readahead);
+		if (ret) {
+			SetPageError(page);
 			zero_user_segment(page, 0, PAGE_SIZE);
-			if (!PageUptodate(page))
-				SetPageUptodate(page);
 			unlock_page(page);
-			goto next_page;
 		}
 
-		/*
-		 * This page will go to BIO.  Do we need to send this
-		 * BIO off first?
-		 */
-		if (bio && (last_block_in_bio != block_nr - 1 ||
-			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
-submit_and_realloc:
-			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-
-		dun = PG_DUN(inode, page);
-		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
-		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
-			__submit_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-
-		if (bio == NULL) {
-			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
-					is_readahead ? REQ_RAHEAD : 0);
-			if (IS_ERR(bio)) {
-				bio = NULL;
-				goto set_error_page;
-			}
-			if (bio_encrypted)
-				fscrypt_set_ice_dun(inode, bio, dun);
-		}
-
-		/*
-		 * If the page is under writeback, we need to wait for
-		 * its completion to see the correct decrypted data.
-		 */
-		f2fs_wait_on_block_writeback(inode, block_nr);
-
-		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
-			goto submit_and_realloc;
-
-		inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-		ClearPageError(page);
-		last_block_in_bio = block_nr;
-		goto next_page;
-set_error_page:
-		SetPageError(page);
-		zero_user_segment(page, 0, PAGE_SIZE);
-		unlock_page(page);
-		goto next_page;
-confused:
-		if (bio) {
-			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-			bio = NULL;
-		}
-		unlock_page(page);
 next_page:
 		if (pages)
 			put_page(page);
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
-		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
-	return 0;
+		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+	return pages ? 0 : ret;
 }
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
@@ -1816,7 +1854,7 @@
 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
 			fio && fio->op == REQ_OP_WRITE &&
 			!(fio->op_flags & REQ_SYNC) &&
-			!f2fs_encrypted_inode(inode))
+			!IS_ENCRYPTED(inode))
 		return true;
 
 	/* this is only set during fdatasync */
@@ -1893,7 +1931,7 @@
 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
 
 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
-							DATA_GENERIC))
+						DATA_GENERIC_ENHANCE))
 			return -EFAULT;
 
 		ipu_force = true;
@@ -1920,7 +1958,7 @@
 got_it:
 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
-							DATA_GENERIC)) {
+						DATA_GENERIC_ENHANCE)) {
 		err = -EFAULT;
 		goto out_writepage;
 	}
@@ -1928,7 +1966,8 @@
 	 * If current allocation needs SSR,
 	 * it had better in-place writes for updated data.
 	 */
-	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
+	if (ipu_force ||
+		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
 					need_inplace_update(fio))) {
 		err = encrypt_one_page(fio);
 		if (err)
@@ -1940,10 +1979,16 @@
 		if (fio->need_lock == LOCK_REQ)
 			f2fs_unlock_op(fio->sbi);
 		err = f2fs_inplace_write_data(fio);
-		if (err && PageWriteback(page))
-			end_page_writeback(page);
+		if (err) {
+			if (f2fs_encrypted_file(inode))
+				fscrypt_pullback_bio_page(&fio->encrypted_page,
+									true);
+			if (PageWriteback(page))
+				end_page_writeback(page);
+		} else {
+			set_inode_flag(inode, FI_UPDATE_WRITE);
+		}
 		trace_f2fs_do_write_data_page(fio->page, IPU);
-		set_inode_flag(inode, FI_UPDATE_WRITE);
 		return err;
 	}
 
@@ -2105,7 +2150,8 @@
 	}
 
 	unlock_page(page);
-	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
+	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
+					!F2FS_I(inode)->cp_task)
 		f2fs_balance_fs(sbi, need_balance_fs);
 
 	if (unlikely(f2fs_cp_error(sbi))) {
@@ -2392,7 +2438,8 @@
 		down_write(&F2FS_I(inode)->i_mmap_sem);
 
 		truncate_pagecache(inode, i_size);
-		f2fs_truncate_blocks(inode, i_size, true, true);
+		if (!IS_NOQUOTA(inode))
+			f2fs_truncate_blocks(inode, i_size, true);
 
 		up_write(&F2FS_I(inode)->i_mmap_sem);
 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2575,6 +2622,11 @@
 		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 	} else {
+		if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+				DATA_GENERIC_ENHANCE_READ)) {
+			err = -EFAULT;
+			goto fail;
+		}
 		err = f2fs_submit_page_read(inode, page, blkaddr);
 		if (err)
 			goto fail;
@@ -2673,14 +2725,11 @@
 {
 	struct f2fs_private_dio *dio;
 	bool write = (bio_op(bio) == REQ_OP_WRITE);
-	int err;
 
 	dio = f2fs_kzalloc(F2FS_I_SB(inode),
 			sizeof(struct f2fs_private_dio), GFP_NOFS);
-	if (!dio) {
-		err = -ENOMEM;
+	if (!dio)
 		goto out;
-	}
 
 	dio->inode = inode;
 	dio->orig_end_io = bio->bi_end_io;
@@ -2826,12 +2875,10 @@
 
 	clear_cold_data(page);
 
-	/* This is atomic written page, keep Private */
 	if (IS_ATOMIC_WRITTEN_PAGE(page))
 		return f2fs_drop_inmem_page(inode, page);
 
-	set_page_private(page, 0);
-	ClearPagePrivate(page);
+	f2fs_clear_page_private(page);
 }
 
 int f2fs_release_page(struct page *page, gfp_t wait)
@@ -2845,8 +2892,7 @@
 		return 0;
 
 	clear_cold_data(page);
-	set_page_private(page, 0);
-	ClearPagePrivate(page);
+	f2fs_clear_page_private(page);
 	return 1;
 }
 
@@ -2914,12 +2960,8 @@
 			return -EAGAIN;
 	}
 
-	/*
-	 * A reference is expected if PagePrivate set when move mapping,
-	 * however F2FS breaks this for maintaining dirty page counts when
-	 * truncating pages. So here adjusting the 'extra_count' make it work.
-	 */
-	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+	/* one extra reference was held for atomic_write page */
+	extra_count = atomic_written ? 1 : 0;
 	rc = migrate_page_move_mapping(mapping, newpage,
 				page, NULL, mode, extra_count);
 	if (rc != MIGRATEPAGE_SUCCESS) {
@@ -2940,9 +2982,10 @@
 		get_page(newpage);
 	}
 
-	if (PagePrivate(page))
-		SetPagePrivate(newpage);
-	set_page_private(newpage, page_private(page));
+	if (PagePrivate(page)) {
+		f2fs_set_page_private(newpage, page_private(page));
+		f2fs_clear_page_private(page);
+	}
 
 	migrate_page_copy(newpage, page);
 
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index f05b37e..d00ba9b 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -522,30 +522,16 @@
 	kvfree(si);
 }
 
-int __init f2fs_create_root_stats(void)
+void __init f2fs_create_root_stats(void)
 {
-	struct dentry *file;
-
 	f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
-	if (!f2fs_debugfs_root)
-		return -ENOMEM;
 
-	file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
-			NULL, &stat_fops);
-	if (!file) {
-		debugfs_remove(f2fs_debugfs_root);
-		f2fs_debugfs_root = NULL;
-		return -ENOMEM;
-	}
-
-	return 0;
+	debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, NULL,
+			    &stat_fops);
 }
 
 void f2fs_destroy_root_stats(void)
 {
-	if (!f2fs_debugfs_root)
-		return;
-
 	debugfs_remove_recursive(f2fs_debugfs_root);
 	f2fs_debugfs_root = NULL;
 }
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 8fd0360..bd93504 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -385,7 +385,7 @@
 		if (err)
 			goto put_error;
 
-		if ((f2fs_encrypted_inode(dir) || dummy_encrypt) &&
+		if ((IS_ENCRYPTED(dir) || dummy_encrypt) &&
 					f2fs_may_encrypt(inode)) {
 			err = fscrypt_inherit_context(dir, inode, page, false);
 			if (err)
@@ -399,7 +399,7 @@
 
 	if (new_name) {
 		init_dent_inode(new_name, page);
-		if (f2fs_encrypted_inode(dir))
+		if (IS_ENCRYPTED(dir))
 			file_set_enc_name(inode);
 	}
 
@@ -728,7 +728,7 @@
 		!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
 		f2fs_clear_radix_tree_dirty_tag(page);
 		clear_page_dirty_for_io(page);
-		ClearPagePrivate(page);
+		f2fs_clear_page_private(page);
 		ClearPageUptodate(page);
 		clear_cold_data(page);
 		inode_dec_dirty_pages(dir);
@@ -800,6 +800,10 @@
 		if (de->name_len == 0) {
 			bit_pos++;
 			ctx->pos = start_pos + bit_pos;
+			printk_ratelimited(
+				"%s, invalid namelen(0), ino:%u, run fsck to fix.",
+				KERN_WARNING, le32_to_cpu(de->ino));
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
 			continue;
 		}
 
@@ -810,7 +814,8 @@
 
 		/* check memory boundary before moving forward */
 		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
-		if (unlikely(bit_pos > d->max)) {
+		if (unlikely(bit_pos > d->max ||
+				le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
 			f2fs_msg(sbi->sb, KERN_WARNING,
 				"%s: corrupted namelen=%d, run fsck to fix.",
 				__func__, le16_to_cpu(de->name_len));
@@ -819,7 +824,7 @@
 			goto out;
 		}
 
-		if (f2fs_encrypted_inode(d->inode)) {
+		if (IS_ENCRYPTED(d->inode)) {
 			int save_len = fstr->len;
 
 			err = fscrypt_fname_disk_to_usr(d->inode,
@@ -862,7 +867,7 @@
 	struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
 	int err = 0;
 
-	if (f2fs_encrypted_inode(inode)) {
+	if (IS_ENCRYPTED(inode)) {
 		err = fscrypt_get_encryption_info(inode);
 		if (err && err != -ENOKEY)
 			goto out;
@@ -891,7 +896,7 @@
 			page_cache_sync_readahead(inode->i_mapping, ra, file, n,
 				min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
 
-		dentry_page = f2fs_get_lock_data_page(inode, n, false);
+		dentry_page = f2fs_find_data_page(inode, n);
 		if (IS_ERR(dentry_page)) {
 			err = PTR_ERR(dentry_page);
 			if (err == -ENOENT) {
@@ -909,11 +914,11 @@
 		err = f2fs_fill_dentries(ctx, &d,
 				n * NR_DENTRY_IN_BLOCK, &fstr);
 		if (err) {
-			f2fs_put_page(dentry_page, 1);
+			f2fs_put_page(dentry_page, 0);
 			break;
 		}
 
-		f2fs_put_page(dentry_page, 1);
+		f2fs_put_page(dentry_page, 0);
 	}
 out_free:
 	fscrypt_fname_free_buffer(&fstr);
@@ -924,7 +929,7 @@
 
 static int f2fs_dir_open(struct inode *inode, struct file *filp)
 {
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
 	return 0;
 }
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a434980..7f72bd0 100755
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -26,7 +26,6 @@
 #include <crypto/hash.h>
 #include <linux/overflow.h>
 
-#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
 #ifdef CONFIG_F2FS_CHECK_FS
@@ -193,6 +192,8 @@
 #define DEF_CP_INTERVAL			60	/* 60 secs */
 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
+#define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
+#define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
 
 struct cp_control {
 	int reason;
@@ -211,7 +212,14 @@
 	META_SSA,
 	META_MAX,
 	META_POR,
-	DATA_GENERIC,
+	DATA_GENERIC,		/* check range only */
+	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
+	DATA_GENERIC_ENHANCE_READ,	/*
+					 * strong check on range and segment
+					 * bitmap but no warning due to race
+					 * condition of read on truncated area
+					 * by extent_cache
+					 */
 	META_GENERIC,
 };
 
@@ -256,7 +264,7 @@
 /* max discard pend list number */
 #define MAX_PLIST_NUM		512
 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
-					(MAX_PLIST_NUM - 1) : (blk_num - 1))
+					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
 
 enum {
 	D_PREP,			/* initial */
@@ -312,6 +320,7 @@
 	bool sync;			/* submit discard with REQ_SYNC flag */
 	bool ordered;			/* issue discard by lba order */
 	unsigned int granularity;	/* discard granularity */
+	int timeout;			/* discard timeout for put_super */
 };
 
 struct discard_cmd_control {
@@ -455,7 +464,6 @@
 
 /* for inline stuff */
 #define DEF_INLINE_RESERVED_SIZE	1
-#define DEF_MIN_INLINE_SIZE		1
 static inline int get_extra_isize(struct inode *inode);
 static inline int get_inline_xattr_addrs(struct inode *inode);
 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
@@ -1039,7 +1047,7 @@
 	bool submitted;		/* indicate IO submission */
 	int need_lock;		/* indicate we need to lock cp_rwsem */
 	bool in_list;		/* indicate fio is in io_list */
-	bool is_meta;		/* indicate borrow meta inode mapping or not */
+	bool is_por;		/* indicate IO is from recovery or not */
 	bool retry;		/* need to reallocate block address */
 	enum iostat_type io_type;	/* io type */
 	struct writeback_control *io_wbc; /* writeback control */
@@ -1066,8 +1074,8 @@
 	block_t start_blk;
 	block_t end_blk;
 #ifdef CONFIG_BLK_DEV_ZONED
-	unsigned int nr_blkz;			/* Total number of zones */
-	u8 *blkz_type;				/* Array of zones type */
+	unsigned int nr_blkz;		/* Total number of zones */
+	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
 #endif
 };
 
@@ -1098,6 +1106,7 @@
 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
 	SBI_IS_RECOVERED,			/* recovered orphan/data */
 	SBI_CP_DISABLED,			/* CP was disabled last mount */
+	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
@@ -1109,6 +1118,7 @@
 	DISCARD_TIME,
 	GC_TIME,
 	DISABLE_TIME,
+	UMOUNT_DISCARD_TIMEOUT,
 	MAX_TIME,
 };
 
@@ -1136,7 +1146,7 @@
 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
 };
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) \
 			(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
 #else
@@ -1237,8 +1247,6 @@
 
 	unsigned int nquota_files;		/* # of quota sysfile */
 
-	u32 s_next_generation;			/* for NFS support */
-
 	/* # of pages, see count_type */
 	atomic_t nr_pages[NR_COUNT_TYPE];
 	/* # of allocated blocks */
@@ -1364,6 +1372,17 @@
 }
 #endif
 
+/*
+ * Test if the mounted volume is a multi-device volume.
+ *   - For a single regular disk volume, sbi->s_ndevs is 0.
+ *   - For a single zoned disk volume, sbi->s_ndevs is 1.
+ *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
+ */
+static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
+{
+	return sbi->s_ndevs > 1;
+}
+
 /* For write statistics. Suppose sector size is 512 bytes,
  * and the return value is in kbytes. s is of struct f2fs_sb_info.
  */
@@ -1776,6 +1795,7 @@
 	return -ENOSPC;
 }
 
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
 						struct inode *inode,
 						block_t count)
@@ -1784,13 +1804,21 @@
 
 	spin_lock(&sbi->stat_lock);
 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
-	f2fs_bug_on(sbi, inode->i_blocks < sectors);
 	sbi->total_valid_block_count -= (block_t)count;
 	if (sbi->reserved_blocks &&
 		sbi->current_reserved_blocks < sbi->reserved_blocks)
 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
 					sbi->current_reserved_blocks + count);
 	spin_unlock(&sbi->stat_lock);
+	if (unlikely(inode->i_blocks < sectors)) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
+			inode->i_ino,
+			(unsigned long long)inode->i_blocks,
+			(unsigned long long)sectors);
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		return;
+	}
 	f2fs_i_blocks_write(inode, count, false, true);
 }
 
@@ -1798,13 +1826,12 @@
 {
 	atomic_inc(&sbi->nr_pages[count_type]);
 
-	if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
-		count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA ||
-		count_type == F2FS_RD_DATA || count_type == F2FS_RD_NODE ||
-		count_type == F2FS_RD_META)
-		return;
-
-	set_sbi_flag(sbi, SBI_IS_DIRTY);
+	if (count_type == F2FS_DIRTY_DENTS ||
+			count_type == F2FS_DIRTY_NODES ||
+			count_type == F2FS_DIRTY_META ||
+			count_type == F2FS_DIRTY_QDATA ||
+			count_type == F2FS_DIRTY_IMETA)
+		set_sbi_flag(sbi, SBI_IS_DIRTY);
 }
 
 static inline void inode_inc_dirty_pages(struct inode *inode)
@@ -1889,7 +1916,11 @@
 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
 		offset = (flag == SIT_BITMAP) ?
 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
-		return &ckpt->sit_nat_version_bitmap + offset;
+		/*
+		 * if large_nat_bitmap feature is enabled, leave checksum
+		 * protection for all nat/sit bitmaps.
+		 */
+		return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32);
 	}
 
 	if (__cp_payload(sbi) > 0) {
@@ -2008,7 +2039,6 @@
 
 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
-	f2fs_bug_on(sbi, !is_inode && !inode->i_blocks);
 
 	sbi->total_valid_node_count--;
 	sbi->total_valid_block_count--;
@@ -2018,10 +2048,19 @@
 
 	spin_unlock(&sbi->stat_lock);
 
-	if (is_inode)
+	if (is_inode) {
 		dquot_free_inode(inode);
-	else
+	} else {
+		if (unlikely(inode->i_blocks == 0)) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+				inode->i_ino,
+				(unsigned long long)inode->i_blocks);
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			return;
+		}
 		f2fs_i_blocks_write(inode, 1, false, true);
+	}
 }
 
 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
@@ -2156,10 +2195,17 @@
 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
 		get_pages(sbi, F2FS_WB_CP_DATA) ||
 		get_pages(sbi, F2FS_DIO_READ) ||
-		get_pages(sbi, F2FS_DIO_WRITE) ||
-		atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
-		atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
+		get_pages(sbi, F2FS_DIO_WRITE))
 		return false;
+
+	if (SM_I(sbi) && SM_I(sbi)->dcc_info &&
+			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
+		return false;
+
+	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
+			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
+		return false;
+
 	return f2fs_time_over(sbi, type);
 }
 
@@ -2300,11 +2346,12 @@
 #define F2FS_EXTENTS_FL			0x00080000 /* Inode uses extents */
 #define F2FS_EA_INODE_FL	        0x00200000 /* Inode used for large EA */
 #define F2FS_EOFBLOCKS_FL		0x00400000 /* Blocks allocated beyond EOF */
+#define F2FS_NOCOW_FL			0x00800000 /* Do not cow file */
 #define F2FS_INLINE_DATA_FL		0x10000000 /* Inode has inline data. */
 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
 #define F2FS_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
 
-#define F2FS_FL_USER_VISIBLE		0x304BDFFF /* User visible flags */
+#define F2FS_FL_USER_VISIBLE		0x30CBDFFF /* User visible flags */
 #define F2FS_FL_USER_MODIFIABLE		0x204BC0FF /* User modifiable flags */
 
 /* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */
@@ -2535,9 +2582,18 @@
 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
 }
 
+#define ALIGN_DOWN(x, a)        __ALIGN_KERNEL((x) - ((a) - 1), (a))
+
 static inline unsigned int addrs_per_inode(struct inode *inode)
 {
-	return CUR_ADDRS_PER_INODE(inode) - get_inline_xattr_addrs(inode);
+	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
+				get_inline_xattr_addrs(inode);
+	return ALIGN_DOWN(addrs, 1);
+}
+
+static inline unsigned int addrs_per_block(struct inode *inode)
+{
+	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1);
 }
 
 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2550,7 +2606,9 @@
 
 static inline int inline_xattr_size(struct inode *inode)
 {
-	return get_inline_xattr_addrs(inode) * sizeof(__le32);
+	if (f2fs_has_inline_xattr(inode))
+		return get_inline_xattr_addrs(inode) * sizeof(__le32);
+	return 0;
 }
 
 static inline int f2fs_has_inline_data(struct inode *inode)
@@ -2791,9 +2849,9 @@
 
 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
-		((offsetof(typeof(*f2fs_inode), field) +	\
+		((offsetof(typeof(*(f2fs_inode)), field) +	\
 		sizeof((f2fs_inode)->field))			\
-		<= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize))	\
+		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
 
 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
 {
@@ -2822,12 +2880,10 @@
 
 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
 
-#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
-				(!is_read_io(fio->op) || fio->is_meta))
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
 
 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type);
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
 					block_t blkaddr, int type)
 {
@@ -2846,13 +2902,25 @@
 	return true;
 }
 
-static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
-						block_t blkaddr)
+static inline void f2fs_set_page_private(struct page *page,
+						unsigned long data)
 {
-	if (!__is_valid_data_blkaddr(blkaddr))
-		return false;
-	verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
-	return true;
+	if (PagePrivate(page))
+		return;
+
+	get_page(page);
+	SetPagePrivate(page);
+	set_page_private(page, data);
+}
+
+static inline void f2fs_clear_page_private(struct page *page)
+{
+	if (!PagePrivate(page))
+		return;
+
+	set_page_private(page, 0);
+	ClearPagePrivate(page);
+	f2fs_put_page(page, 0);
 }
 
 /*
@@ -2860,8 +2928,7 @@
  */
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
-							bool buf_write);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
 int f2fs_truncate(struct inode *inode);
 int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 			struct kstat *stat);
@@ -3034,7 +3101,7 @@
 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
-bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
 					struct cp_control *cpc);
 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
@@ -3356,7 +3423,7 @@
 
 int f2fs_build_stats(struct f2fs_sb_info *sbi);
 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
-int __init f2fs_create_root_stats(void);
+void __init f2fs_create_root_stats(void);
 void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_cp_count(si)				do { } while (0)
@@ -3394,7 +3461,7 @@
 
 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline int __init f2fs_create_root_stats(void) { return 0; }
+static inline void __init f2fs_create_root_stats(void) { }
 static inline void f2fs_destroy_root_stats(void) { }
 #endif
 
@@ -3490,19 +3557,14 @@
 /*
  * crypto support
  */
-static inline bool f2fs_encrypted_inode(struct inode *inode)
-{
-	return file_is_encrypt(inode);
-}
-
 static inline bool f2fs_encrypted_file(struct inode *inode)
 {
-	return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
+	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
 }
 
 static inline void f2fs_set_encrypted_inode(struct inode *inode)
 {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	file_set_encrypt(inode);
 	f2fs_set_inode_flags(inode);
 #endif
@@ -3535,16 +3597,12 @@
 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
 
 #ifdef CONFIG_BLK_DEV_ZONED
-static inline int get_blkz_type(struct f2fs_sb_info *sbi,
-			struct block_device *bdev, block_t blkaddr)
+static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
+				    block_t blkaddr)
 {
 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
-	int i;
 
-	for (i = 0; i < sbi->s_ndevs; i++)
-		if (FDEV(i).bdev == bdev)
-			return FDEV(i).blkz_type[zno];
-	return -EINVAL;
+	return test_bit(zno, FDEV(devi).blkz_seq);
 }
 #endif
 
@@ -3553,9 +3611,27 @@
 	return f2fs_sb_has_blkzoned(sbi);
 }
 
+static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
+{
+	return blk_queue_discard(bdev_get_queue(bdev)) ||
+#ifdef CONFIG_BLK_DEV_ZONED
+	       bdev_is_zoned(bdev);
+#else
+	       0;
+#endif
+}
+
 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
 {
-	return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
+	int i;
+
+	if (!f2fs_is_multi_device(sbi))
+		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (f2fs_bdev_support_discard(FDEV(i).bdev))
+			return true;
+	return false;
 }
 
 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
@@ -3564,6 +3640,20 @@
 					f2fs_hw_should_discard(sbi);
 }
 
+static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
+{
+	int i;
+
+	if (!f2fs_is_multi_device(sbi))
+		return bdev_read_only(sbi->sb->s_bdev);
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (bdev_read_only(FDEV(i).bdev))
+			return true;
+	return false;
+}
+
+
 static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
 {
 	clear_opt(sbi, ADAPTIVE);
@@ -3581,7 +3671,7 @@
 
 static inline bool f2fs_may_encrypt(struct inode *inode)
 {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	umode_t mode = inode->i_mode;
 
 	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
@@ -3620,7 +3710,7 @@
 	if (f2fs_post_read_required(inode) &&
 		!fscrypt_using_hardware_encryption(inode))
 		return true;
-	if (sbi->s_ndevs)
+	if (f2fs_is_multi_device(sbi))
 		return true;
 	/*
 	 * for blkzoned device, fallback direct IO to buffered IO, so
@@ -3654,8 +3744,6 @@
 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
 #endif
 
-#endif
-
 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
 {
 #ifdef CONFIG_QUOTA
@@ -3668,3 +3756,5 @@
 #endif
 	return false;
 }
+
+#endif /* _LINUX_F2FS_H */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d350497..24fe54e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -40,6 +40,8 @@
 	err = filemap_fault(vma, vmf);
 	up_read(&F2FS_I(inode)->i_mmap_sem);
 
+	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)err);
+
 	return err;
 }
 
@@ -358,7 +360,7 @@
 	switch (whence) {
 	case SEEK_DATA:
 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
-			is_valid_data_blkaddr(sbi, blkaddr))
+			__is_valid_data_blkaddr(blkaddr))
 			return true;
 		break;
 	case SEEK_HOLE:
@@ -424,7 +426,7 @@
 
 			if (__is_valid_data_blkaddr(blkaddr) &&
 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
-						blkaddr, DATA_GENERIC)) {
+					blkaddr, DATA_GENERIC_ENHANCE)) {
 				f2fs_put_dnode(&dn);
 				goto fail;
 			}
@@ -525,7 +527,8 @@
 		f2fs_set_data_blkaddr(dn);
 
 		if (__is_valid_data_blkaddr(blkaddr) &&
-			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+			!f2fs_is_valid_blkaddr(sbi, blkaddr,
+					DATA_GENERIC_ENHANCE))
 			continue;
 
 		f2fs_invalidate_blocks(sbi, blkaddr);
@@ -554,7 +557,7 @@
 
 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
 {
-	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
+	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
 }
 
 static int truncate_partial_data_page(struct inode *inode, u64 from,
@@ -584,15 +587,14 @@
 	zero_user(page, offset, PAGE_SIZE - offset);
 
 	/* An encrypted inode should have a key and truncate the last page. */
-	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
+	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
 	if (!cache_only)
 		set_page_dirty(page);
 	f2fs_put_page(page, 1);
 	return 0;
 }
 
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
-							bool buf_write)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct dnode_of_data dn;
@@ -600,7 +602,6 @@
 	int count = 0, err = 0;
 	struct page *ipage;
 	bool truncate_page = false;
-	int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
 
 	trace_f2fs_truncate_blocks_enter(inode, from);
 
@@ -610,7 +611,7 @@
 		goto free_partial;
 
 	if (lock)
-		__do_map_lock(sbi, flag, true);
+		f2fs_lock_op(sbi);
 
 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(ipage)) {
@@ -648,7 +649,7 @@
 	err = f2fs_truncate_inode_blocks(inode, free_from);
 out:
 	if (lock)
-		__do_map_lock(sbi, flag, false);
+		f2fs_unlock_op(sbi);
 free_partial:
 	/* lastly zero out the first data page */
 	if (!err)
@@ -683,7 +684,7 @@
 			return err;
 	}
 
-	err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
+	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
 	if (err)
 		return err;
 
@@ -714,7 +715,7 @@
 		stat->attributes |= STATX_ATTR_APPEND;
 	if (flags & F2FS_COMPR_FL)
 		stat->attributes |= STATX_ATTR_COMPRESSED;
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		stat->attributes |= STATX_ATTR_ENCRYPTED;
 	if (flags & F2FS_IMMUTABLE_FL)
 		stat->attributes |= STATX_ATTR_IMMUTABLE;
@@ -771,7 +772,6 @@
 {
 	struct inode *inode = d_inode(dentry);
 	int err;
-	bool size_changed = false;
 
 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 		return -EIO;
@@ -846,8 +846,6 @@
 		down_write(&F2FS_I(inode)->i_sem);
 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
 		up_write(&F2FS_I(inode)->i_sem);
-
-		size_changed = true;
 	}
 
 	__setattr_copy(inode, attr);
@@ -861,7 +859,7 @@
 	}
 
 	/* file size may changed here */
-	f2fs_mark_inode_dirty_sync(inode, size_changed);
+	f2fs_mark_inode_dirty_sync(inode, true);
 
 	/* inode change will produce dirty node pages flushed by checkpoint */
 	f2fs_balance_fs(F2FS_I_SB(inode), true);
@@ -1014,7 +1012,8 @@
 	} else if (ret == -ENOENT) {
 		if (dn.max_level == 0)
 			return -ENOENT;
-		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
+									len);
 		blkaddr += done;
 		do_replace += done;
 		goto next;
@@ -1025,6 +1024,14 @@
 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
 		*blkaddr = datablock_addr(dn.inode,
 					dn.node_page, dn.ofs_in_node);
+
+		if (__is_valid_data_blkaddr(*blkaddr) &&
+			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
+					DATA_GENERIC_ENHANCE)) {
+			f2fs_put_dnode(&dn);
+			return -EFAULT;
+		}
+
 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
 
 			if (test_opt(sbi, LFS)) {
@@ -1165,7 +1172,7 @@
 	int ret;
 
 	while (len) {
-		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
 
 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
 					array_size(olen, sizeof(block_t)),
@@ -1265,7 +1272,7 @@
 	new_size = i_size_read(inode) - len;
 	truncate_pagecache(inode, new_size);
 
-	ret = f2fs_truncate_blocks(inode, new_size, true, false);
+	ret = f2fs_truncate_blocks(inode, new_size, true);
 	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (!ret)
 		f2fs_i_size_write(inode, new_size);
@@ -1450,7 +1457,7 @@
 	f2fs_balance_fs(sbi, true);
 
 	down_write(&F2FS_I(inode)->i_mmap_sem);
-	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
+	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
 	up_write(&F2FS_I(inode)->i_mmap_sem);
 	if (ret)
 		return ret;
@@ -1566,7 +1573,7 @@
 	if (!S_ISREG(inode->i_mode))
 		return -EINVAL;
 
-	if (f2fs_encrypted_inode(inode) &&
+	if (IS_ENCRYPTED(inode) &&
 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
 		return -EOPNOTSUPP;
 
@@ -1650,10 +1657,12 @@
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	unsigned int flags = fi->i_flags;
 
-	if (f2fs_encrypted_inode(inode))
+	if (IS_ENCRYPTED(inode))
 		flags |= F2FS_ENCRYPT_FL;
 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
 		flags |= F2FS_INLINE_DATA_FL;
+	if (is_inode_flag_set(inode, FI_PIN_FILE))
+		flags |= F2FS_NOCOW_FL;
 
 	flags &= F2FS_FL_USER_VISIBLE;
 
@@ -1966,11 +1975,11 @@
 		break;
 	case F2FS_GOING_DOWN_NEED_FSCK:
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+		set_sbi_flag(sbi, SBI_IS_DIRTY);
 		/* do checkpoint only */
 		ret = f2fs_sync_fs(sb, 1);
-		if (ret)
-			goto out;
-		break;
+		goto out;
 	default:
 		ret = -EINVAL;
 		goto out;
@@ -1986,6 +1995,9 @@
 out:
 	if (in != F2FS_GOING_DOWN_FULLSYNC)
 		mnt_drop_write_file(filp);
+
+	trace_f2fs_shutdown(sbi, in, ret);
+
 	return ret;
 }
 
@@ -2412,7 +2424,7 @@
 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
 		return -EINVAL;
 
-	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
 		return -EOPNOTSUPP;
 
 	if (src == dst) {
@@ -2569,10 +2581,10 @@
 							sizeof(range)))
 		return -EFAULT;
 
-	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
+	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
 			__is_large_section(sbi)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"Can't flush %u in %d for segs_per_sec %u != 1\n",
+			"Can't flush %u in %d for segs_per_sec %u != 1",
 				range.dev_num, sbi->s_ndevs,
 				sbi->segs_per_sec);
 		return -EINVAL;
@@ -2634,7 +2646,7 @@
 
 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"%s: Enable GC = ino %lx after %x GC trials\n",
+			"%s: Enable GC = ino %lx after %x GC trials",
 			__func__, inode->i_ino,
 			fi->i_gc_failures[GC_FAILURE_PIN]);
 		clear_inode_flag(inode, FI_PIN_FILE);
@@ -2649,8 +2661,8 @@
 	__u32 pin;
 	int ret = 0;
 
-	if (!inode_owner_or_capable(inode))
-		return -EACCES;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
 
 	if (get_user(pin, (__u32 __user *)arg))
 		return -EFAULT;
@@ -2807,15 +2819,21 @@
 	struct inode *inode = file_inode(file);
 	ssize_t ret;
 
-	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
-		return -EIO;
+	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+		ret = -EIO;
+		goto out;
+	}
 
-	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
-		return -EINVAL;
+	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	if (!inode_trylock(inode)) {
-		if (iocb->ki_flags & IOCB_NOWAIT)
-			return -EAGAIN;
+		if (iocb->ki_flags & IOCB_NOWAIT) {
+			ret = -EAGAIN;
+			goto out;
+		}
 		inode_lock(inode);
 	}
 
@@ -2828,19 +2846,16 @@
 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
 			set_inode_flag(inode, FI_NO_PREALLOC);
 
-		if ((iocb->ki_flags & IOCB_NOWAIT) &&
-			(iocb->ki_flags & IOCB_DIRECT)) {
-				if (!f2fs_overwrite_io(inode, iocb->ki_pos,
+		if ((iocb->ki_flags & IOCB_NOWAIT)) {
+			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
 						iov_iter_count(from)) ||
-					f2fs_has_inline_data(inode) ||
-					f2fs_force_buffered_io(inode,
-							iocb, from)) {
-						clear_inode_flag(inode,
-								FI_NO_PREALLOC);
-						inode_unlock(inode);
-						return -EAGAIN;
-				}
-
+				f2fs_has_inline_data(inode) ||
+				f2fs_force_buffered_io(inode, iocb, from)) {
+				clear_inode_flag(inode, FI_NO_PREALLOC);
+				inode_unlock(inode);
+				ret = -EAGAIN;
+				goto out;
+			}
 		} else {
 			preallocated = true;
 			target_size = iocb->ki_pos + iov_iter_count(from);
@@ -2849,7 +2864,8 @@
 			if (err) {
 				clear_inode_flag(inode, FI_NO_PREALLOC);
 				inode_unlock(inode);
-				return err;
+				ret = err;
+				goto out;
 			}
 		}
 		ret = __generic_file_write_iter(iocb, from);
@@ -2863,7 +2879,9 @@
 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
 	}
 	inode_unlock(inode);
-
+out:
+	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
+					iov_iter_count(from), ret);
 	if (ret > 0)
 		ret = generic_write_sync(iocb, ret);
 	return ret;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d421720..24b74d6 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -591,7 +591,7 @@
 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
 		bidx = node_ofs - 5 - dec;
 	}
-	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
+	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
 }
 
 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -656,6 +656,11 @@
 
 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 		dn.data_blkaddr = ei.blk + index - ei.fofs;
+		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+						DATA_GENERIC_ENHANCE_READ))) {
+			err = -EFAULT;
+			goto put_page;
+		}
 		goto got_it;
 	}
 
@@ -665,8 +670,12 @@
 		goto put_page;
 	f2fs_put_dnode(&dn);
 
+	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
+		err = -ENOENT;
+		goto put_page;
+	}
 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
-						DATA_GENERIC))) {
+						DATA_GENERIC_ENHANCE))) {
 		err = -EFAULT;
 		goto put_page;
 	}
@@ -1175,6 +1184,7 @@
 				"type [%d, %d] in SSA and SIT",
 				segno, type, GET_SUM_TYPE((&sum->footer)));
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_stop_checkpoint(sbi, false);
 			goto skip;
 		}
 
@@ -1346,7 +1356,7 @@
 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
 
 	/* give warm/cold data area from slower device */
-	if (sbi->s_ndevs && !__is_large_section(sbi))
+	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index b8676a2..d67622c 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -316,7 +316,7 @@
 		clear_inode_flag(inode, FI_INLINE_DATA);
 		f2fs_put_page(ipage, 1);
 	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
-		if (f2fs_truncate_blocks(inode, 0, false, false))
+		if (f2fs_truncate_blocks(inode, 0, false))
 			return false;
 		goto process_inline;
 	}
@@ -438,6 +438,14 @@
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
 
+	/*
+	 * should retrieve reserved space which was used to keep
+	 * inline_dentry's structure for backward compatibility.
+	 */
+	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
+			!f2fs_has_inline_xattr(dir))
+		F2FS_I(dir)->i_inline_xattr_size = 0;
+
 	f2fs_i_depth_write(dir, 1);
 	if (i_size_read(dir) < PAGE_SIZE)
 		f2fs_i_size_write(dir, PAGE_SIZE);
@@ -488,7 +496,7 @@
 	return 0;
 punch_dentry_pages:
 	truncate_inode_pages(&dir->i_data, 0);
-	f2fs_truncate_blocks(dir, 0, false, false);
+	f2fs_truncate_blocks(dir, 0, false);
 	f2fs_remove_dirty_inode(dir);
 	return err;
 }
@@ -519,6 +527,15 @@
 
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
+
+	/*
+	 * should retrieve reserved space which was used to keep
+	 * inline_dentry's structure for backward compatibility.
+	 */
+	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
+			!f2fs_has_inline_xattr(dir))
+		F2FS_I(dir)->i_inline_xattr_size = 0;
+
 	kvfree(backup_dentry);
 	return 0;
 recover:
@@ -677,6 +694,12 @@
 	if (IS_ERR(ipage))
 		return PTR_ERR(ipage);
 
+	/*
+	 * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
+	 * ipage without page's lock held.
+	 */
+	unlock_page(ipage);
+
 	inline_dentry = inline_data_addr(inode, ipage);
 
 	make_dentry_ptr_inline(inode, &d, inline_dentry);
@@ -685,7 +708,7 @@
 	if (!err)
 		ctx->pos = d.max;
 
-	f2fs_put_page(ipage, 1);
+	f2fs_put_page(ipage, 0);
 	return err < 0 ? err : 0;
 }
 
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index bec5296..ccb0222 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -14,6 +14,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include "xattr.h"
 
 #include <trace/events/f2fs.h>
 
@@ -43,7 +44,7 @@
 		new_fl |= S_NOATIME;
 	if (flags & F2FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	if (f2fs_encrypted_inode(inode))
+	if (file_is_encrypt(inode))
 		new_fl |= S_ENCRYPTED;
 	inode_set_flags(inode, new_fl,
 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
@@ -72,7 +73,7 @@
 
 	if (!__is_valid_data_blkaddr(addr))
 		return 1;
-	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
 		return -EFAULT;
 	return 0;
 }
@@ -176,8 +177,8 @@
 
 	if (provided != calculated)
 		f2fs_msg(sbi->sb, KERN_WARNING,
-			"checksum invalid, ino = %x, %x vs. %x",
-			ino_of_node(page), provided, calculated);
+			"checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
+			page->index, ino_of_node(page), provided, calculated);
 
 	return provided == calculated;
 }
@@ -248,13 +249,28 @@
 		return false;
 	}
 
+	if (f2fs_has_extra_attr(inode) &&
+		f2fs_sb_has_flexible_inline_xattr(sbi) &&
+		f2fs_has_inline_xattr(inode) &&
+		(!fi->i_inline_xattr_size ||
+		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"%s: inode (ino=%lx) has corrupted "
+			"i_inline_xattr_size: %d, max: %zu",
+			__func__, inode->i_ino, fi->i_inline_xattr_size,
+			MAX_INLINE_XATTR_SIZE);
+		return false;
+	}
+
 	if (F2FS_I(inode)->extent_tree) {
 		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
 
 		if (ei->len &&
-			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
+						DATA_GENERIC_ENHANCE) ||
 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
-							DATA_GENERIC))) {
+						DATA_GENERIC_ENHANCE))) {
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
 			f2fs_msg(sbi->sb, KERN_WARNING,
 				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
@@ -453,7 +469,7 @@
 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
 		inode_nohighmem(inode);
 	} else if (S_ISLNK(inode->i_mode)) {
-		if (f2fs_encrypted_inode(inode))
+		if (file_is_encrypt(inode))
 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
 		else
 			inode->i_op = &f2fs_symlink_inode_operations;
@@ -473,6 +489,7 @@
 	return inode;
 
 bad_inode:
+	f2fs_inode_synced(inode);
 	iget_failed(inode);
 	trace_f2fs_iget_exit(inode, ret);
 	return ERR_PTR(ret);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 98fe5a7..b70ab57 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -10,6 +10,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
+#include <linux/random.h>
 #include <linux/dcache.h>
 #include <linux/namei.h>
 #include <linux/quotaops.h>
@@ -50,7 +51,7 @@
 	inode->i_blocks = 0;
 	inode->i_mtime = inode->i_atime = inode->i_ctime =
 			F2FS_I(inode)->i_crtime = current_time(inode);
-	inode->i_generation = sbi->s_next_generation++;
+	inode->i_generation = prandom_u32();
 
 	if (S_ISDIR(inode->i_mode))
 		F2FS_I(inode)->i_current_depth = 1;
@@ -75,7 +76,7 @@
 	set_inode_flag(inode, FI_NEW_INODE);
 
 	/* If the directory encrypted, then we should encrypt the inode. */
-	if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
+	if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
 				f2fs_may_encrypt(inode))
 		f2fs_set_encrypted_inode(inode);
 
@@ -142,7 +143,7 @@
 	return ERR_PTR(err);
 }
 
-static int is_extension_exist(const unsigned char *s, const char *sub)
+static inline int is_extension_exist(const unsigned char *s, const char *sub)
 {
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
@@ -476,7 +477,7 @@
 		if (err)
 			goto out_iput;
 	}
-	if (f2fs_encrypted_inode(dir) &&
+	if (IS_ENCRYPTED(dir) &&
 	    (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 	    !fscrypt_has_permitted_context(dir, inode)) {
 		f2fs_msg(inode->i_sb, KERN_WARNING,
@@ -803,7 +804,7 @@
 	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
+	if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
 		int err = fscrypt_get_encryption_info(dir);
 		if (err)
 			return err;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 46fdaeb..6e8f75e1 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -454,7 +454,7 @@
 			new_blkaddr == NULL_ADDR);
 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 			new_blkaddr == NEW_ADDR);
-	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
+	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
 			new_blkaddr == NEW_ADDR);
 
 	/* increment version no as node is removed */
@@ -465,7 +465,7 @@
 
 	/* change address */
 	nat_set_blkaddr(e, new_blkaddr);
-	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
+	if (!__is_valid_data_blkaddr(new_blkaddr))
 		set_nat_flag(e, IS_CHECKPOINTED, false);
 	__set_nat_cache_dirty(nm_i, e);
 
@@ -526,6 +526,7 @@
 	struct f2fs_nat_entry ne;
 	struct nat_entry *e;
 	pgoff_t index;
+	block_t blkaddr;
 	int i;
 
 	ni->nid = nid;
@@ -569,6 +570,11 @@
 	node_info_from_raw_nat(ni, &ne);
 	f2fs_put_page(page, 1);
 cache:
+	blkaddr = le32_to_cpu(ne.block_addr);
+	if (__is_valid_data_blkaddr(blkaddr) &&
+		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
+		return -EFAULT;
+
 	/* cache nat entry */
 	cache_nat_entry(sbi, nid, &ne);
 	return 0;
@@ -600,9 +606,9 @@
 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
 {
 	const long direct_index = ADDRS_PER_INODE(dn->inode);
-	const long direct_blks = ADDRS_PER_BLOCK;
-	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
-	unsigned int skipped_unit = ADDRS_PER_BLOCK;
+	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
+	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
+	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
 	int cur_level = dn->cur_level;
 	int max_level = dn->max_level;
 	pgoff_t base = 0;
@@ -636,9 +642,9 @@
 				int offset[4], unsigned int noffset[4])
 {
 	const long direct_index = ADDRS_PER_INODE(inode);
-	const long direct_blks = ADDRS_PER_BLOCK;
+	const long direct_blks = ADDRS_PER_BLOCK(inode);
 	const long dptrs_per_blk = NIDS_PER_BLOCK;
-	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 	int n = 0;
 	int level = 0;
@@ -1179,8 +1185,14 @@
 		f2fs_put_dnode(&dn);
 		return -EIO;
 	}
-	f2fs_bug_on(F2FS_I_SB(inode),
-			inode->i_blocks != 0 && inode->i_blocks != 8);
+
+	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
+		f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+			"Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+			inode->i_ino,
+			(unsigned long long)inode->i_blocks);
+		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+	}
 
 	/* will put inode & node pages */
 	err = truncate_node(&dn);
@@ -1275,9 +1287,10 @@
 	int err;
 
 	if (PageUptodate(page)) {
-#ifdef CONFIG_F2FS_CHECK_FS
-		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
-#endif
+		if (!f2fs_inode_chksum_verify(sbi, page)) {
+			ClearPageUptodate(page);
+			return -EBADMSG;
+		}
 		return LOCKED_PAGE;
 	}
 
@@ -1543,7 +1556,8 @@
 	}
 
 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
-		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
+		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
+					DATA_GENERIC_ENHANCE)) {
 		up_read(&sbi->node_write);
 		goto redirty_out;
 	}
@@ -1922,7 +1936,9 @@
 	f2fs_balance_fs_bg(sbi);
 
 	/* collect a number of dirty node pages and write together */
-	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
+	if (wbc->sync_mode != WB_SYNC_ALL &&
+			get_pages(sbi, F2FS_DIRTY_NODES) <
+					nr_pages_to_skip(sbi, NODE))
 		goto skip_write;
 
 	if (wbc->sync_mode == WB_SYNC_ALL)
@@ -1961,7 +1977,7 @@
 	if (!PageDirty(page)) {
 		__set_page_dirty_nobuffers(page);
 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
-		SetPagePrivate(page);
+		f2fs_set_page_private(page, 0);
 		f2fs_trace_pid(page);
 		return 1;
 	}
@@ -2076,6 +2092,9 @@
 	if (unlikely(nid == 0))
 		return false;
 
+	if (unlikely(f2fs_check_nid_range(sbi, nid)))
+		return false;
+
 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
 	i->nid = nid;
 	i->state = FREE_NID;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 5a99911..1217501 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -317,8 +317,10 @@
 			break;
 		}
 
-		if (!is_recoverable_dnode(page))
+		if (!is_recoverable_dnode(page)) {
+			f2fs_put_page(page, 1);
 			break;
+		}
 
 		if (!is_fsync_dnode(page))
 			goto next;
@@ -330,8 +332,10 @@
 			if (!check_only &&
 					IS_INODE(page) && is_dent_dnode(page)) {
 				err = f2fs_recover_inode_page(sbi, page);
-				if (err)
+				if (err) {
+					f2fs_put_page(page, 1);
 					break;
+				}
 				quota_inode = true;
 			}
 
@@ -347,6 +351,7 @@
 					err = 0;
 					goto next;
 				}
+				f2fs_put_page(page, 1);
 				break;
 			}
 		}
@@ -362,6 +367,7 @@
 				"%s: detect looped node chain, "
 				"blkaddr:%u, next:%u",
 				__func__, blkaddr, next_blkaddr_of_node(page));
+			f2fs_put_page(page, 1);
 			err = -EINVAL;
 			break;
 		}
@@ -372,7 +378,6 @@
 
 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
 	}
-	f2fs_put_page(page, 1);
 	return err;
 }
 
@@ -538,7 +543,15 @@
 		goto err;
 
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
-	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
+
+	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
+		f2fs_msg(sbi->sb, KERN_WARNING,
+			"Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
+			inode->i_ino, ofs_of_node(dn.node_page),
+			ofs_of_node(page));
+		err = -EFAULT;
+		goto err;
+	}
 
 	for (; start < end; start++, dn.ofs_in_node++) {
 		block_t src, dest;
@@ -546,6 +559,18 @@
 		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
 		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
 
+		if (__is_valid_data_blkaddr(src) &&
+			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
+			err = -EFAULT;
+			goto err;
+		}
+
+		if (__is_valid_data_blkaddr(dest) &&
+			!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+			err = -EFAULT;
+			goto err;
+		}
+
 		/* skip recovering if dest is the same as src */
 		if (src == dest)
 			continue;
@@ -658,8 +683,10 @@
 		 */
 		if (IS_INODE(page)) {
 			err = recover_inode(entry->inode, page);
-			if (err)
+			if (err) {
+				f2fs_put_page(page, 1);
 				break;
+			}
 		}
 		if (entry->last_dentry == blkaddr) {
 			err = recover_dentry(entry->inode, page, dir_list);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index eaf7be5..2a8dc33 100755
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -191,8 +191,7 @@
 
 	f2fs_trace_pid(page);
 
-	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
-	SetPagePrivate(page);
+	f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
 
 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
 
@@ -215,7 +214,8 @@
 }
 
 static int __revoke_inmem_pages(struct inode *inode,
-				struct list_head *head, bool drop, bool recover)
+				struct list_head *head, bool drop, bool recover,
+				bool trylock)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct inmem_pages *cur, *tmp;
@@ -227,7 +227,16 @@
 		if (drop)
 			trace_f2fs_commit_inmem_page(page, INMEM_DROP);
 
-		lock_page(page);
+		if (trylock) {
+			/*
+			 * to avoid deadlock in between page lock and
+			 * inmem_lock.
+			 */
+			if (!trylock_page(page))
+				continue;
+		} else {
+			lock_page(page);
+		}
 
 		f2fs_wait_on_page_writeback(page, DATA, true, true);
 
@@ -270,8 +279,7 @@
 			ClearPageUptodate(page);
 			clear_cold_data(page);
 		}
-		set_page_private(page, 0);
-		ClearPagePrivate(page);
+		f2fs_clear_page_private(page);
 		f2fs_put_page(page, 1);
 
 		list_del(&cur->list);
@@ -318,13 +326,19 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 
-	mutex_lock(&fi->inmem_lock);
-	__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
-	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
-	if (!list_empty(&fi->inmem_ilist))
-		list_del_init(&fi->inmem_ilist);
-	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
-	mutex_unlock(&fi->inmem_lock);
+	while (!list_empty(&fi->inmem_pages)) {
+		mutex_lock(&fi->inmem_lock);
+		__revoke_inmem_pages(inode, &fi->inmem_pages,
+						true, false, true);
+
+		if (list_empty(&fi->inmem_pages)) {
+			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+			if (!list_empty(&fi->inmem_ilist))
+				list_del_init(&fi->inmem_ilist);
+			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+		}
+		mutex_unlock(&fi->inmem_lock);
+	}
 
 	clear_inode_flag(inode, FI_ATOMIC_FILE);
 	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
@@ -354,8 +368,7 @@
 	kmem_cache_free(inmem_entry_slab, cur);
 
 	ClearPageUptodate(page);
-	set_page_private(page, 0);
-	ClearPagePrivate(page);
+	f2fs_clear_page_private(page);
 	f2fs_put_page(page, 0);
 
 	trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
@@ -429,12 +442,15 @@
 		 * recovery or rewrite & commit last transaction. For other
 		 * error number, revoking was done by filesystem itself.
 		 */
-		err = __revoke_inmem_pages(inode, &revoke_list, false, true);
+		err = __revoke_inmem_pages(inode, &revoke_list,
+						false, true, false);
 
 		/* drop all uncommitted pages */
-		__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+		__revoke_inmem_pages(inode, &fi->inmem_pages,
+						true, false, false);
 	} else {
-		__revoke_inmem_pages(inode, &revoke_list, false, false);
+		__revoke_inmem_pages(inode, &revoke_list,
+						false, false, false);
 	}
 
 	return err;
@@ -542,9 +558,13 @@
 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
 				struct block_device *bdev)
 {
-	struct bio *bio = f2fs_bio_alloc(sbi, 0, true);
+	struct bio *bio;
 	int ret;
 
+	bio = f2fs_bio_alloc(sbi, 0, false);
+	if (!bio)
+		return -ENOMEM;
+
 	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
 	bio->bi_bdev = bdev;
 	ret = submit_bio_wait(bio);
@@ -560,7 +580,7 @@
 	int ret = 0;
 	int i;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
 
 	for (i = 0; i < sbi->s_ndevs; i++) {
@@ -628,7 +648,8 @@
 		return ret;
 	}
 
-	if (atomic_inc_return(&fcc->queued_flush) == 1 || sbi->s_ndevs > 1) {
+	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
+	    f2fs_is_multi_device(sbi)) {
 		ret = submit_flush_wait(sbi, ino);
 		atomic_dec(&fcc->queued_flush);
 
@@ -734,7 +755,7 @@
 {
 	int ret = 0, i;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return 0;
 
 	for (i = 1; i < sbi->s_ndevs; i++) {
@@ -868,6 +889,9 @@
 
 	if (holes[DATA] > ovp || holes[NODE] > ovp)
 		return -EAGAIN;
+	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
+		dirty_segments(sbi) > overprovision_segments(sbi))
+		return -EAGAIN;
 	return 0;
 }
 
@@ -1036,6 +1060,7 @@
 
 	dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
 	dpolicy->io_aware_gran = MAX_PLIST_NUM;
+	dpolicy->timeout = 0;
 
 	if (discard_type == DPOLICY_BG) {
 		dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -1058,6 +1083,8 @@
 	} else if (discard_type == DPOLICY_UMOUNT) {
 		dpolicy->max_requests = UINT_MAX;
 		dpolicy->io_aware = false;
+		/* we need to issue all to keep CP_TRIMMED_FLAG */
+		dpolicy->granularity = 1;
 	}
 }
 
@@ -1337,9 +1364,12 @@
 {
 	block_t lblkstart = blkstart;
 
+	if (!f2fs_bdev_support_discard(bdev))
+		return 0;
+
 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
 
-	if (sbi->s_ndevs) {
+	if (f2fs_is_multi_device(sbi)) {
 		int devi = f2fs_target_device_index(sbi, blkstart);
 
 		blkstart -= FDEV(devi).start_blk;
@@ -1420,7 +1450,14 @@
 	int i, issued = 0;
 	bool io_interrupted = false;
 
+	if (dpolicy->timeout != 0)
+		f2fs_update_time(sbi, dpolicy->timeout);
+
 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+		if (dpolicy->timeout != 0 &&
+				f2fs_time_over(sbi, dpolicy->timeout))
+			break;
+
 		if (i + 1 < dpolicy->granularity)
 			break;
 
@@ -1607,7 +1644,7 @@
 }
 
 /* This comes from f2fs_put_super */
-bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
 {
 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 	struct discard_policy dpolicy;
@@ -1615,6 +1652,7 @@
 
 	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
 					dcc->discard_granularity);
+	dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT;
 	__issue_discard_cmd(sbi, &dpolicy);
 	dropped = __drop_discard_cmd(sbi);
 
@@ -1694,42 +1732,36 @@
 	block_t lblkstart = blkstart;
 	int devi = 0;
 
-	if (sbi->s_ndevs) {
+	if (f2fs_is_multi_device(sbi)) {
 		devi = f2fs_target_device_index(sbi, blkstart);
+		if (blkstart < FDEV(devi).start_blk ||
+		    blkstart > FDEV(devi).end_blk) {
+			f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x",
+				 blkstart);
+			return -EIO;
+		}
 		blkstart -= FDEV(devi).start_blk;
 	}
 
-	/*
-	 * We need to know the type of the zone: for conventional zones,
-	 * use regular discard if the drive supports it. For sequential
-	 * zones, reset the zone write pointer.
-	 */
-	switch (get_blkz_type(sbi, bdev, blkstart)) {
-
-	case BLK_ZONE_TYPE_CONVENTIONAL:
-		if (!blk_queue_discard(bdev_get_queue(bdev)))
-			return 0;
-		return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
-	case BLK_ZONE_TYPE_SEQWRITE_REQ:
-	case BLK_ZONE_TYPE_SEQWRITE_PREF:
+	/* For sequential zones, reset the zone write pointer */
+	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
 		sector = SECTOR_FROM_BLOCK(blkstart);
 		nr_sects = SECTOR_FROM_BLOCK(blklen);
 
 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
 				nr_sects != bdev_zone_sectors(bdev)) {
-			f2fs_msg(sbi->sb, KERN_INFO,
-				"(%d) %s: Unaligned discard attempted (block %x + %x)",
+			f2fs_msg(sbi->sb, KERN_ERR,
+				"(%d) %s: Unaligned zone reset attempted (block %x + %x)",
 				devi, sbi->s_ndevs ? FDEV(devi).path: "",
 				blkstart, blklen);
 			return -EIO;
 		}
 		trace_f2fs_issue_reset_zone(bdev, blkstart);
-		return blkdev_reset_zones(bdev, sector,
-					  nr_sects, GFP_NOFS);
-	default:
-		/* Unknown zone type: broken device ? */
-		return -EIO;
+		return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
 	}
+
+	/* For conventional zones, use regular discard if supported */
+	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
 }
 #endif
 
@@ -1737,8 +1769,7 @@
 		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
 #ifdef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sbi) &&
-				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
 #endif
 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
@@ -2134,8 +2165,11 @@
 			 * before, we must track that to know how much space we
 			 * really have.
 			 */
-			if (f2fs_test_bit(offset, se->ckpt_valid_map))
+			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
+				spin_lock(&sbi->stat_lock);
 				sbi->unusable_block_count++;
+				spin_unlock(&sbi->stat_lock);
+			}
 		}
 
 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
@@ -2182,7 +2216,7 @@
 	struct seg_entry *se;
 	bool is_cp = false;
 
-	if (!is_valid_data_blkaddr(sbi, blkaddr))
+	if (!__is_valid_data_blkaddr(blkaddr))
 		return true;
 
 	down_read(&sit_i->sentry_lock);
@@ -3052,7 +3086,7 @@
 	struct f2fs_sb_info *sbi = fio->sbi;
 	unsigned int devidx;
 
-	if (!sbi->s_ndevs)
+	if (!f2fs_is_multi_device(sbi))
 		return;
 
 	devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
@@ -3150,21 +3184,26 @@
 {
 	int err;
 	struct f2fs_sb_info *sbi = fio->sbi;
+	unsigned int segno;
 
 	fio->new_blkaddr = fio->old_blkaddr;
 	/* i/o temperature is needed for passing down write hints */
 	__get_segment_type(fio);
 
-	f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
-			GET_SEGNO(sbi, fio->new_blkaddr))->type));
+	segno = GET_SEGNO(sbi, fio->new_blkaddr);
+
+	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		return -EFAULT;
+	}
 
 	stat_inc_inplace_blocks(fio->sbi);
 
 	err = f2fs_submit_page_bio(fio);
-	if (!err)
+	if (!err) {
 		update_device_state(fio);
-
-	f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+		f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+	}
 
 	return err;
 }
@@ -3299,7 +3338,7 @@
 	if (!f2fs_post_read_required(inode))
 		return;
 
-	if (!is_valid_data_blkaddr(sbi, blkaddr))
+	if (!__is_valid_data_blkaddr(blkaddr))
 		return;
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index a77f76f..429007b 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -82,7 +82,7 @@
 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 
 #define GET_SEGNO(sbi, blk_addr)					\
-	((!is_valid_data_blkaddr(sbi, blk_addr)) ?			\
+	((!__is_valid_data_blkaddr(blk_addr)) ?			\
 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 #define BLKS_PER_SEC(sbi)					\
@@ -656,14 +656,15 @@
 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
-static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
+static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
 {
 	struct f2fs_sb_info *sbi = fio->sbi;
 
-	if (__is_meta_io(fio))
-		verify_blkaddr(sbi, blk_addr, META_GENERIC);
-	else
-		verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
+	if (__is_valid_data_blkaddr(fio->old_blkaddr))
+		verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
+					META_GENERIC : DATA_GENERIC);
+	verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
+					META_GENERIC : DATA_GENERIC_ENHANCE);
 }
 
 /*
@@ -672,7 +673,6 @@
 static inline int check_block_count(struct f2fs_sb_info *sbi,
 		int segno, struct f2fs_sit_entry *raw_sit)
 {
-#ifdef CONFIG_F2FS_CHECK_FS
 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
 	int cur_pos = 0, next_pos;
@@ -699,7 +699,7 @@
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		return -EINVAL;
 	}
-#endif
+
 	/* check segment usage, and check boundary of a given segment number */
 	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
 					|| segno > TOTAL_SEGS(sbi) - 1)) {
@@ -865,7 +865,7 @@
 		}
 	}
 	mutex_unlock(&dcc->cmd_lock);
-	if (!wakeup)
+	if (!wakeup || !is_idle(sbi, DISCARD_TIME))
 		return;
 wake_up:
 	dcc->discard_wake = 1;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3e0250b..a6de02a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -269,7 +269,7 @@
 	if (!qname) {
 		f2fs_msg(sb, KERN_ERR,
 			"Not enough memory for storing quotafile name");
-		return -EINVAL;
+		return -ENOMEM;
 	}
 	if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
 		if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
@@ -586,7 +586,7 @@
 		case Opt_io_size_bits:
 			if (args->from && match_int(args, &arg))
 				return -EINVAL;
-			if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
+			if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"Not support %d, larger than %d",
 					1 << arg, BIO_MAX_PAGES);
@@ -757,7 +757,7 @@
 			kvfree(name);
 			break;
 		case Opt_test_dummy_encryption:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 			if (!f2fs_sb_has_encrypt(sbi)) {
 				f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
 				return -EINVAL;
@@ -821,6 +821,8 @@
 	}
 
 	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+		int min_size, max_size;
+
 		if (!f2fs_sb_has_extra_attr(sbi) ||
 			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
 			f2fs_msg(sb, KERN_ERR,
@@ -834,14 +836,15 @@
 					"set with inline_xattr option");
 			return -EINVAL;
 		}
-		if (!F2FS_OPTION(sbi).inline_xattr_size ||
-			F2FS_OPTION(sbi).inline_xattr_size >=
-					DEF_ADDRS_PER_INODE -
-					F2FS_TOTAL_EXTRA_ATTR_SIZE -
-					DEF_INLINE_RESERVED_SIZE -
-					DEF_MIN_INLINE_SIZE) {
+
+		min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
+		max_size = MAX_INLINE_XATTR_SIZE;
+
+		if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
+				F2FS_OPTION(sbi).inline_xattr_size > max_size) {
 			f2fs_msg(sb, KERN_ERR,
-					"inline xattr size is out of range");
+				"inline xattr size is out of range: %d ~ %d",
+				min_size, max_size);
 			return -EINVAL;
 		}
 	}
@@ -915,6 +918,10 @@
 			sb_start_intwrite(inode->i_sb);
 			f2fs_i_size_write(inode, 0);
 
+			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
+					inode, NULL, 0, DATA);
+			truncate_inode_pages_final(inode->i_mapping);
+
 			if (F2FS_HAS_BLOCKS(inode))
 				f2fs_truncate(inode);
 
@@ -1017,7 +1024,7 @@
 	for (i = 0; i < sbi->s_ndevs; i++) {
 		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
 #ifdef CONFIG_BLK_DEV_ZONED
-		kvfree(FDEV(i).blkz_type);
+		kvfree(FDEV(i).blkz_seq);
 #endif
 	}
 	kvfree(sbi->devs);
@@ -1048,7 +1055,7 @@
 	}
 
 	/* be sure to wait for any on-going discard commands */
-	dropped = f2fs_wait_discard_bios(sbi);
+	dropped = f2fs_issue_discard_timeout(sbi);
 
 	if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
 					!sbi->discard_blks && !dropped) {
@@ -1219,10 +1226,13 @@
 	buf->f_blocks = total_count - start_count;
 	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
 						sbi->current_reserved_blocks;
+
+	spin_lock(&sbi->stat_lock);
 	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
 		buf->f_bfree = 0;
 	else
 		buf->f_bfree -= sbi->unusable_block_count;
+	spin_unlock(&sbi->stat_lock);
 
 	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
 		buf->f_bavail = buf->f_bfree -
@@ -1393,7 +1403,7 @@
 		seq_printf(seq, ",whint_mode=%s", "user-based");
 	else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
 		seq_printf(seq, ",whint_mode=%s", "fs-based");
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	if (F2FS_OPTION(sbi).test_dummy_encryption)
 		seq_puts(seq, ",test_dummy_encryption");
 #endif
@@ -1458,9 +1468,16 @@
 
 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
 {
+	unsigned int s_flags = sbi->sb->s_flags;
 	struct cp_control cpc;
-	int err;
+	int err = 0;
+	int ret;
 
+	if (s_flags & MS_RDONLY) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+				"checkpoint=disable on readonly fs");
+		return -EINVAL;
+	}
 	sbi->sb->s_flags |= MS_ACTIVE;
 
 	f2fs_update_time(sbi, DISABLE_TIME);
@@ -1468,27 +1485,41 @@
 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
 		mutex_lock(&sbi->gc_mutex);
 		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
-		if (err == -ENODATA)
+		if (err == -ENODATA) {
+			err = 0;
 			break;
+		}
 		if (err && err != -EAGAIN)
-			return err;
+			break;
 	}
 
-	err = sync_filesystem(sbi->sb);
-	if (err)
-		return err;
+	ret = sync_filesystem(sbi->sb);
+	if (ret || err) {
+		err = ret ? ret: err;
+		goto restore_flag;
+	}
 
-	if (f2fs_disable_cp_again(sbi))
-		return -EAGAIN;
+	if (f2fs_disable_cp_again(sbi)) {
+		err = -EAGAIN;
+		goto restore_flag;
+	}
 
 	mutex_lock(&sbi->gc_mutex);
 	cpc.reason = CP_PAUSE;
 	set_sbi_flag(sbi, SBI_CP_DISABLED);
-	f2fs_write_checkpoint(sbi, &cpc);
+	err = f2fs_write_checkpoint(sbi, &cpc);
+	if (err)
+		goto out_unlock;
 
+	spin_lock(&sbi->stat_lock);
 	sbi->unusable_block_count = 0;
+	spin_unlock(&sbi->stat_lock);
+
+out_unlock:
 	mutex_unlock(&sbi->gc_mutex);
-	return 0;
+restore_flag:
+	sbi->sb->s_flags = s_flags;	/* Restore MS_RDONLY status */
+	return err;
 }
 
 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
@@ -2026,6 +2057,12 @@
 			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
 		}
 	}
+	/*
+	 * In case of checkpoint=disable, we must flush quota blocks.
+	 * This can cause NULL exception for node_inode in end_io, since
+	 * put_super already dropped it.
+	 */
+	sync_filesystem(sb);
 }
 
 static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
@@ -2157,7 +2194,7 @@
 	.remount_fs	= f2fs_remount,
 };
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
 {
 	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
@@ -2248,7 +2285,7 @@
 static loff_t max_file_blocks(void)
 {
 	loff_t result = 0;
-	loff_t leaf_count = ADDRS_PER_BLOCK;
+	loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
 
 	/*
 	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
@@ -2426,7 +2463,7 @@
 	/* Currently, support only 4KB page cache size */
 	if (F2FS_BLKSIZE != PAGE_SIZE) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid page_cache_size (%lu), supports only 4KB\n",
+			"Invalid page_cache_size (%lu), supports only 4KB",
 			PAGE_SIZE);
 		return 1;
 	}
@@ -2435,7 +2472,7 @@
 	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
 	if (blocksize != F2FS_BLKSIZE) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid blocksize (%u), supports only 4KB\n",
+			"Invalid blocksize (%u), supports only 4KB",
 			blocksize);
 		return 1;
 	}
@@ -2443,7 +2480,7 @@
 	/* check log blocks per segment */
 	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
 		f2fs_msg(sb, KERN_INFO,
-			"Invalid log blocks per segment (%u)\n",
+			"Invalid log blocks per segment (%u)",
 			le32_to_cpu(raw_super->log_blocks_per_seg));
 		return 1;
 	}
@@ -2564,7 +2601,8 @@
 	unsigned int log_blocks_per_seg;
 	unsigned int segment_count_main;
 	unsigned int cp_pack_start_sum, cp_payload;
-	block_t user_block_count;
+	block_t user_block_count, valid_user_blocks;
+	block_t avail_node_count, valid_node_count;
 	int i, j;
 
 	total = le32_to_cpu(raw_super->segment_count);
@@ -2599,6 +2637,24 @@
 		return 1;
 	}
 
+	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
+	if (valid_user_blocks > user_block_count) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong valid_user_blocks: %u, user_block_count: %u",
+			valid_user_blocks, user_block_count);
+		return 1;
+	}
+
+	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
+	avail_node_count = sbi->total_node_count - sbi->nquota_files -
+						F2FS_RESERVED_NODE_NUM;
+	if (valid_node_count > avail_node_count) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong valid_node_count: %u, avail_node_count: %u",
+			valid_node_count, avail_node_count);
+		return 1;
+	}
+
 	main_segs = le32_to_cpu(raw_super->segment_count_main);
 	blocks_per_seg = sbi->blocks_per_seg;
 
@@ -2706,6 +2762,8 @@
 	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
 	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
 	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
+	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
+				DEF_UMOUNT_DISCARD_TIMEOUT;
 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
 
 	for (i = 0; i < NR_COUNT_TYPE; i++)
@@ -2768,9 +2826,11 @@
 	if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
 		FDEV(devi).nr_blkz++;
 
-	FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
-								GFP_KERNEL);
-	if (!FDEV(devi).blkz_type)
+	FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
+					BITS_TO_LONGS(FDEV(devi).nr_blkz)
+					* sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!FDEV(devi).blkz_seq)
 		return -ENOMEM;
 
 #define F2FS_REPORT_NR_ZONES   4096
@@ -2797,7 +2857,8 @@
 		}
 
 		for (i = 0; i < nr_zones; i++) {
-			FDEV(devi).blkz_type[n] = zones[i].type;
+			if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
+				set_bit(n, FDEV(devi).blkz_seq);
 			sector += zones[i].len;
 			n++;
 		}
@@ -3029,10 +3090,11 @@
 	struct f2fs_super_block *raw_super;
 	struct inode *root;
 	int err;
-	bool retry = true, need_fsck = false;
+	bool skip_recovery = false, need_fsck = false;
 	char *options = NULL;
 	int recovery, i, valid_super_block;
 	struct curseg_info *seg_i;
+	int retry_cnt = 1;
 
 try_onemore:
 	err = -EINVAL;
@@ -3083,7 +3145,7 @@
 #ifndef CONFIG_BLK_DEV_ZONED
 	if (f2fs_sb_has_blkzoned(sbi)) {
 		f2fs_msg(sb, KERN_ERR,
-			 "Zoned block device support is not enabled\n");
+			 "Zoned block device support is not enabled");
 		err = -EOPNOTSUPP;
 		goto free_sb_buf;
 	}
@@ -3104,7 +3166,6 @@
 	sb->s_maxbytes = sbi->max_file_blocks <<
 				le32_to_cpu(raw_super->log_blocksize);
 	sb->s_max_links = F2FS_LINK_MAX;
-	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 
 #ifdef CONFIG_QUOTA
 	sb->dq_op = &f2fs_quota_operations;
@@ -3123,7 +3184,7 @@
 #endif
 
 	sb->s_op = &f2fs_sops;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	sb->s_cop = &f2fs_cryptops;
 #endif
 	sb->s_xattr = f2fs_xattr_handlers;
@@ -3207,6 +3268,10 @@
 
 	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
+		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
+		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
+	}
 
 	/* Initialize device list */
 	err = f2fs_scan_devices(sbi);
@@ -3294,7 +3359,7 @@
 	sb->s_root = d_make_root(root); /* allocate root dentry */
 	if (!sb->s_root) {
 		err = -ENOMEM;
-		goto free_root_inode;
+		goto free_node_inode;
 	}
 
 	err = f2fs_register_sysfs(sbi);
@@ -3316,7 +3381,7 @@
 		goto free_meta;
 
 	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
-		goto skip_recovery;
+		goto reset_checkpoint;
 
 	/* recover fsynced data */
 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@@ -3324,20 +3389,29 @@
 		 * mount should be failed, when device has readonly mode, and
 		 * previous checkpoint was not done by clean system shutdown.
 		 */
-		if (bdev_read_only(sb->s_bdev) &&
-				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
-			err = -EROFS;
-			goto free_meta;
+		if (f2fs_hw_is_readonly(sbi)) {
+			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+				err = -EROFS;
+				f2fs_msg(sb, KERN_ERR,
+					"Need to recover fsync data, but "
+					"write access unavailable");
+				goto free_meta;
+			}
+			f2fs_msg(sbi->sb, KERN_INFO, "write access "
+				"unavailable, skipping recovery");
+			goto reset_checkpoint;
 		}
 
 		if (need_fsck)
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
 
-		if (!retry)
-			goto skip_recovery;
+		if (skip_recovery)
+			goto reset_checkpoint;
 
 		err = f2fs_recover_fsync_data(sbi, false);
 		if (err < 0) {
+			if (err != -ENOMEM)
+				skip_recovery = true;
 			need_fsck = true;
 			f2fs_msg(sb, KERN_ERR,
 				"Cannot recover all fsync data errno=%d", err);
@@ -3353,14 +3427,14 @@
 			goto free_meta;
 		}
 	}
-skip_recovery:
+reset_checkpoint:
 	/* f2fs_recover_fsync_data() cleared this already */
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
 		err = f2fs_disable_checkpoint(sbi);
 		if (err)
-			goto free_meta;
+			goto sync_free_meta;
 	} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
 		f2fs_enable_checkpoint(sbi);
 	}
@@ -3373,7 +3447,7 @@
 		/* After POR, we can run background GC thread.*/
 		err = f2fs_start_gc_thread(sbi);
 		if (err)
-			goto free_meta;
+			goto sync_free_meta;
 	}
 	kvfree(options);
 
@@ -3393,8 +3467,14 @@
 				cur_cp_version(F2FS_CKPT(sbi)));
 	f2fs_update_time(sbi, CP_TIME);
 	f2fs_update_time(sbi, REQ_TIME);
+	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
 	return 0;
 
+sync_free_meta:
+	/* safe to flush all the data */
+	sync_filesystem(sbi->sb);
+	retry_cnt = 0;
+
 free_meta:
 #ifdef CONFIG_QUOTA
 	f2fs_truncate_quota_inode_pages(sb);
@@ -3408,6 +3488,8 @@
 	 * falls into an infinite loop in f2fs_sync_meta_pages().
 	 */
 	truncate_inode_pages_final(META_MAPPING(sbi));
+	/* evict some inodes being cached by GC */
+	evict_inodes(sb);
 	f2fs_unregister_sysfs(sbi);
 free_root_inode:
 	dput(sb->s_root);
@@ -3451,8 +3533,8 @@
 	kvfree(sbi);
 
 	/* give only one another chance */
-	if (retry) {
-		retry = false;
+	if (retry_cnt > 0 && skip_recovery) {
+		retry_cnt--;
 		shrink_dcache_sb(sb);
 		goto try_onemore;
 	}
@@ -3553,9 +3635,7 @@
 	err = register_filesystem(&f2fs_fs_type);
 	if (err)
 		goto free_shrinker;
-	err = f2fs_create_root_stats();
-	if (err)
-		goto free_filesystem;
+	f2fs_create_root_stats();
 	err = f2fs_init_post_read_processing();
 	if (err)
 		goto free_root_stats;
@@ -3563,7 +3643,6 @@
 
 free_root_stats:
 	f2fs_destroy_root_stats();
-free_filesystem:
 	unregister_filesystem(&f2fs_fs_type);
 free_shrinker:
 	unregister_shrinker(&f2fs_shrinker_info);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 948c1a2..676e3cf 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -222,6 +222,8 @@
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
 		return -EINVAL;
+	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+		return -EINVAL;
 #endif
 	if (a->struct_type == RESERVED_BLOCKS) {
 		spin_lock(&sbi->stat_lock);
@@ -278,10 +280,16 @@
 		return count;
 	}
 
-	*ui = t;
 
-	if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
-		f2fs_reset_iostat(sbi);
+	if (!strcmp(a->attr.name, "iostat_enable")) {
+		sbi->iostat_enable = !!t;
+		if (!sbi->iostat_enable)
+			f2fs_reset_iostat(sbi);
+		return count;
+	}
+
+	*ui = (unsigned int)t;
+
 	return count;
 }
 
@@ -418,6 +426,8 @@
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
 					interval_time[DISCARD_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
+		umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
@@ -431,7 +441,7 @@
 F2FS_GENERAL_RO_ATTR(features);
 F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
 
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
 #endif
 #ifdef CONFIG_BLK_DEV_ZONED
@@ -475,6 +485,7 @@
 	ATTR_LIST(idle_interval),
 	ATTR_LIST(discard_idle_interval),
 	ATTR_LIST(gc_idle_interval),
+	ATTR_LIST(umount_discard_timeout),
 	ATTR_LIST(iostat_enable),
 	ATTR_LIST(readdir_ra),
 	ATTR_LIST(gc_pin_file_thresh),
@@ -492,7 +503,7 @@
 };
 
 static struct attribute *f2fs_feat_attrs[] = {
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#ifdef CONFIG_FS_ENCRYPTION
 	ATTR_LIST(encryption),
 #endif
 #ifdef CONFIG_BLK_DEV_ZONED
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index ce2a5eb..d0ab533 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -14,7 +14,7 @@
 #include "trace.h"
 
 static RADIX_TREE(pids, GFP_ATOMIC);
-static struct mutex pids_lock;
+static spinlock_t pids_lock;
 static struct last_io_info last_io;
 
 static inline void __print_last_io(void)
@@ -58,23 +58,29 @@
 
 	set_page_private(page, (unsigned long)pid);
 
+retry:
 	if (radix_tree_preload(GFP_NOFS))
 		return;
 
-	mutex_lock(&pids_lock);
+	spin_lock(&pids_lock);
 	p = radix_tree_lookup(&pids, pid);
 	if (p == current)
 		goto out;
 	if (p)
 		radix_tree_delete(&pids, pid);
 
-	f2fs_radix_tree_insert(&pids, pid, current);
+	if (radix_tree_insert(&pids, pid, current)) {
+		spin_unlock(&pids_lock);
+		radix_tree_preload_end();
+		cond_resched();
+		goto retry;
+	}
 
 	trace_printk("%3x:%3x %4x %-16s\n",
 			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
 			pid, current->comm);
 out:
-	mutex_unlock(&pids_lock);
+	spin_unlock(&pids_lock);
 	radix_tree_preload_end();
 }
 
@@ -119,7 +125,7 @@
 
 void f2fs_build_trace_ios(void)
 {
-	mutex_init(&pids_lock);
+	spin_lock_init(&pids_lock);
 }
 
 #define PIDVEC_SIZE	128
@@ -147,7 +153,7 @@
 	pid_t next_pid = 0;
 	unsigned int found;
 
-	mutex_lock(&pids_lock);
+	spin_lock(&pids_lock);
 	while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
 		unsigned idx;
 
@@ -155,5 +161,5 @@
 		for (idx = 0; idx < found; idx++)
 			radix_tree_delete(&pids, pid[idx]);
 	}
-	mutex_unlock(&pids_lock);
+	spin_unlock(&pids_lock);
 }
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 18d5ffb..e791741 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -202,12 +202,17 @@
 	return handler;
 }
 
-static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
-					size_t len, const char *name)
+static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
+				void *last_base_addr, int index,
+				size_t len, const char *name)
 {
 	struct f2fs_xattr_entry *entry;
 
 	list_for_each_xattr(entry, base_addr) {
+		if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+			(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
+			return NULL;
+
 		if (entry->e_name_index != index)
 			continue;
 		if (entry->e_name_len != len)
@@ -224,11 +229,11 @@
 {
 	struct f2fs_xattr_entry *entry;
 	unsigned int inline_size = inline_xattr_size(inode);
+	void *max_addr = base_addr + inline_size;
 
 	list_for_each_xattr(entry, base_addr) {
-		if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
-			(void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
-			base_addr + inline_size) {
+		if ((void *)entry + sizeof(__u32) > max_addr ||
+			(void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
 			*last_addr = entry;
 			return NULL;
 		}
@@ -239,6 +244,13 @@
 		if (!memcmp(entry->e_name, name, len))
 			break;
 	}
+
+	/* inline xattr header or entry across max inline xattr size */
+	if (IS_XATTR_LAST_ENTRY(entry) &&
+		(void *)entry + sizeof(__u32) > max_addr) {
+		*last_addr = entry;
+		return NULL;
+	}
 	return entry;
 }
 
@@ -290,20 +302,22 @@
 				const char *name, struct f2fs_xattr_entry **xe,
 				void **base_addr, int *base_size)
 {
-	void *cur_addr, *txattr_addr, *last_addr = NULL;
+	void *cur_addr, *txattr_addr, *last_txattr_addr;
+	void *last_addr = NULL;
 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
-	unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
 	unsigned int inline_size = inline_xattr_size(inode);
 	int err = 0;
 
-	if (!size && !inline_size)
+	if (!xnid && !inline_size)
 		return -ENODATA;
 
-	*base_size = inline_size + size + XATTR_PADDING_SIZE;
+	*base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
 	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
 	if (!txattr_addr)
 		return -ENOMEM;
 
+	last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
+
 	/* read from inline xattr */
 	if (inline_size) {
 		err = read_inline_xattr(inode, ipage, txattr_addr);
@@ -330,7 +344,11 @@
 	else
 		cur_addr = txattr_addr;
 
-	*xe = __find_xattr(cur_addr, index, len, name);
+	*xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+	if (!*xe) {
+		err = -EFAULT;
+		goto out;
+	}
 check:
 	if (IS_XATTR_LAST_ENTRY(*xe)) {
 		err = -ENODATA;
@@ -340,7 +358,7 @@
 	*base_addr = txattr_addr;
 	return 0;
 out:
-	kzfree(txattr_addr);
+	kvfree(txattr_addr);
 	return err;
 }
 
@@ -383,7 +401,7 @@
 	*base_addr = txattr_addr;
 	return 0;
 fail:
-	kzfree(txattr_addr);
+	kvfree(txattr_addr);
 	return err;
 }
 
@@ -510,7 +528,7 @@
 	}
 	error = size;
 out:
-	kzfree(base_addr);
+	kvfree(base_addr);
 	return error;
 }
 
@@ -538,7 +556,7 @@
 		if (!handler || (handler->list && !handler->list(dentry)))
 			continue;
 
-		prefix = handler->prefix ?: handler->name;
+		prefix = xattr_prefix(handler);
 		prefix_len = strlen(prefix);
 		size = prefix_len + entry->e_name_len + 1;
 		if (buffer) {
@@ -556,7 +574,7 @@
 	}
 	error = buffer_size - rest;
 cleanup:
-	kzfree(base_addr);
+	kvfree(base_addr);
 	return error;
 }
 
@@ -574,7 +592,8 @@
 			struct page *ipage, int flags)
 {
 	struct f2fs_xattr_entry *here, *last;
-	void *base_addr;
+	void *base_addr, *last_base_addr;
+	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
 	int found, newsize;
 	size_t len;
 	__u32 new_hsize;
@@ -598,8 +617,14 @@
 	if (error)
 		return error;
 
+	last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
 	/* find entry with wanted name. */
-	here = __find_xattr(base_addr, index, len, name);
+	here = __find_xattr(base_addr, last_base_addr, index, len, name);
+	if (!here) {
+		error = -EFAULT;
+		goto exit;
+	}
 
 	found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
 
@@ -687,7 +712,7 @@
 	if (!error && S_ISDIR(inode->i_mode))
 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
 exit:
-	kzfree(base_addr);
+	kvfree(base_addr);
 	return error;
 }
 
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 67db134..a90920e 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -71,6 +71,8 @@
 				entry = XATTR_NEXT_ENTRY(entry))
 #define VALID_XATTR_BLOCK_SIZE	(PAGE_SIZE - sizeof(struct node_footer))
 #define XATTR_PADDING_SIZE	(sizeof(__u32))
+#define XATTR_SIZE(x,i)		(((x) ? VALID_XATTR_BLOCK_SIZE : 0) +	\
+						(inline_xattr_size(i)))
 #define MIN_OFFSET(i)		XATTR_ALIGN(inline_xattr_size(i) +	\
 						VALID_XATTR_BLOCK_SIZE)
 
@@ -78,6 +80,12 @@
 				sizeof(struct f2fs_xattr_header) -	\
 				sizeof(struct f2fs_xattr_entry))
 
+#define MAX_INLINE_XATTR_SIZE						\
+			(DEF_ADDRS_PER_INODE -				\
+			F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -	\
+			DEF_INLINE_RESERVED_SIZE -			\
+			MIN_INLINE_DENTRY_SIZE / sizeof(__le32))
+
 /*
  * On-disk structure of f2fs_xattr
  * We use inline xattrs space + 1 block for xattr.
diff --git a/fs/file.c b/fs/file.c
index 69d6990..09aac4d 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -475,6 +475,7 @@
 		.full_fds_bits	= init_files.full_fds_bits_init,
 	},
 	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
+	.resize_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
 };
 
 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 17ad41d..6faf93b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@
 	struct work_struct	work;
 };
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+	up_write(&bdi->wb_switch_rwsem);
+}
+
 static void inode_switch_wbs_work_fn(struct work_struct *work)
 {
 	struct inode_switch_wbs_context *isw =
 		container_of(work, struct inode_switch_wbs_context, work);
 	struct inode *inode = isw->inode;
+	struct backing_dev_info *bdi = inode_to_bdi(inode);
 	struct address_space *mapping = inode->i_mapping;
 	struct bdi_writeback *old_wb = inode->i_wb;
 	struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@
 	void **slot;
 
 	/*
+	 * If @inode switches cgwb membership while sync_inodes_sb() is
+	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
+	 */
+	down_read(&bdi->wb_switch_rwsem);
+
+	/*
 	 * By the time control reaches here, RCU grace period has passed
 	 * since I_WB_SWITCH assertion and all wb stat update transactions
 	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -435,6 +452,8 @@
 	spin_unlock(&new_wb->list_lock);
 	spin_unlock(&old_wb->list_lock);
 
+	up_read(&bdi->wb_switch_rwsem);
+
 	if (switched) {
 		wb_wakeup(new_wb);
 		wb_put(old_wb);
@@ -475,9 +494,18 @@
 	if (inode->i_state & I_WB_SWITCH)
 		return;
 
+	/*
+	 * Avoid starting new switches while sync_inodes_sb() is in
+	 * progress.  Otherwise, if the down_write protected issue path
+	 * blocks heavily, we might end up starting a large number of
+	 * switches which will block on the rwsem.
+	 */
+	if (!down_read_trylock(&bdi->wb_switch_rwsem))
+		return;
+
 	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
 	if (!isw)
-		return;
+		goto out_unlock;
 
 	/* find and pin the new wb */
 	rcu_read_lock();
@@ -502,8 +530,6 @@
 
 	isw->inode = inode;
 
-	atomic_inc(&isw_nr_in_flight);
-
 	/*
 	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
 	 * the RCU protected stat update paths to grab the mapping's
@@ -511,12 +537,17 @@
 	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
 	 */
 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
-	return;
+
+	atomic_inc(&isw_nr_in_flight);
+
+	goto out_unlock;
 
 out_free:
 	if (isw->new_wb)
 		wb_put(isw->new_wb);
 	kfree(isw);
+out_unlock:
+	up_read(&bdi->wb_switch_rwsem);
 }
 
 /**
@@ -878,7 +909,11 @@
 void cgroup_writeback_umount(void)
 {
 	if (atomic_read(&isw_nr_in_flight)) {
-		synchronize_rcu();
+		/*
+		 * Use rcu_barrier() to wait for all pending callbacks to
+		 * ensure that all in-flight wb switches are in the workqueue.
+		 */
+		rcu_barrier();
 		flush_workqueue(isw_wq);
 	}
 }
@@ -894,6 +929,9 @@
 
 #else	/* CONFIG_CGROUP_WRITEBACK */
 
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
 static struct bdi_writeback *
 locked_inode_to_wb_and_lock_list(struct inode *inode)
 	__releases(&inode->i_lock)
@@ -2408,8 +2446,11 @@
 		return;
 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
+	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+	bdi_down_write_wb_switch_rwsem(bdi);
 	bdi_split_work_to_wbs(bdi, &work, false);
 	wb_wait_for_completion(bdi, &done);
+	bdi_up_write_wb_switch_rwsem(bdi);
 
 	wait_sb_inodes(sb);
 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3541710..066efed 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1552,7 +1552,7 @@
 {
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	struct fuse_inode *fi = get_fuse_inode(inode);
-	size_t crop = i_size_read(inode);
+	loff_t crop = i_size_read(inode);
 	struct fuse_req *req;
 
 	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
@@ -2998,6 +2998,13 @@
 		}
 	}
 
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	    offset + length > i_size_read(inode)) {
+		err = inode_newsize_ok(inode, offset + length);
+		if (err)
+			return err;
+	}
+
 	if (!(mode & FALLOC_FL_KEEP_SIZE))
 		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001487b..4acc677 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -746,11 +746,17 @@
 					umode_t mode, dev_t dev)
 {
 	struct inode *inode;
-	struct resv_map *resv_map;
+	struct resv_map *resv_map = NULL;
 
-	resv_map = resv_map_alloc();
-	if (!resv_map)
-		return NULL;
+	/*
+	 * Reserve maps are only needed for inodes that can have associated
+	 * page allocations.
+	 */
+	if (S_ISREG(mode) || S_ISLNK(mode)) {
+		resv_map = resv_map_alloc();
+		if (!resv_map)
+			return NULL;
+	}
 
 	inode = new_inode(sb);
 	if (inode) {
@@ -782,8 +788,10 @@
 			break;
 		}
 		lockdep_annotate_inode_mutex_key(inode);
-	} else
-		kref_put(&resv_map->refs, resv_map_release);
+	} else {
+		if (resv_map)
+			kref_put(&resv_map->refs, resv_map_release);
+	}
 
 	return inode;
 }
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 31f8ca0..10ec276 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -700,9 +700,11 @@
                            the last tag we set up. */
 
 			tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
-
-			jbd2_descriptor_block_csum_set(journal, descriptor);
 start_journal_io:
+			if (descriptor)
+				jbd2_descriptor_block_csum_set(journal,
+							descriptor);
+
 			for (i = 0; i < bufs; i++) {
 				struct buffer_head *bh = wbuf[i];
 				/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index d10bb2c..3cbcf64 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1339,6 +1339,10 @@
 	journal_superblock_t *sb = journal->j_superblock;
 	int ret;
 
+	/* Buffer got discarded which means block device got invalidated */
+	if (!buffer_mapped(bh))
+		return -EIO;
+
 	trace_jbd2_write_superblock(journal, write_flags);
 	if (!(journal->j_flags & JBD2_BARRIER))
 		write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index b320c1b..799f96c 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1211,11 +1211,12 @@
 	struct journal_head *jh;
 	char *committed_data = NULL;
 
-	JBUFFER_TRACE(jh, "entry");
 	if (jbd2_write_access_granted(handle, bh, true))
 		return 0;
 
 	jh = jbd2_journal_add_journal_head(bh);
+	JBUFFER_TRACE(jh, "entry");
+
 	/*
 	 * Do this first --- it can drop the journal lock, so we want to
 	 * make sure that obtaining the committed_data is done
@@ -1326,15 +1327,17 @@
 
 	if (is_handle_aborted(handle))
 		return -EROFS;
-	if (!buffer_jbd(bh)) {
-		ret = -EUCLEAN;
-		goto out;
-	}
+	if (!buffer_jbd(bh))
+		return -EUCLEAN;
+
 	/*
 	 * We don't grab jh reference here since the buffer must be part
 	 * of the running transaction.
 	 */
 	jh = bh2jh(bh);
+	jbd_debug(5, "journal_head %p\n", jh);
+	JBUFFER_TRACE(jh, "entry");
+
 	/*
 	 * This and the following assertions are unreliable since we may see jh
 	 * in inconsistent state unless we grab bh_state lock. But this is
@@ -1368,9 +1371,6 @@
 	}
 
 	journal = transaction->t_journal;
-	jbd_debug(5, "journal_head %p\n", jh);
-	JBUFFER_TRACE(jh, "entry");
-
 	jbd_lock_bh_state(bh);
 
 	if (jh->b_modified == 0) {
@@ -1568,14 +1568,21 @@
 		/* However, if the buffer is still owned by a prior
 		 * (committing) transaction, we can't drop it yet... */
 		JBUFFER_TRACE(jh, "belongs to older transaction");
-		/* ... but we CAN drop it from the new transaction if we
-		 * have also modified it since the original commit. */
+		/* ... but we CAN drop it from the new transaction through
+		 * marking the buffer as freed and set j_next_transaction to
+		 * the new transaction, so that not only the commit code
+		 * knows it should clear dirty bits when it is done with the
+		 * buffer, but also the buffer can be checkpointed only
+		 * after the new transaction commits. */
 
-		if (jh->b_next_transaction) {
-			J_ASSERT(jh->b_next_transaction == transaction);
+		set_buffer_freed(bh);
+
+		if (!jh->b_next_transaction) {
 			spin_lock(&journal->j_list_lock);
-			jh->b_next_transaction = NULL;
+			jh->b_next_transaction = transaction;
 			spin_unlock(&journal->j_list_lock);
+		} else {
+			J_ASSERT(jh->b_next_transaction == transaction);
 
 			/*
 			 * only drop a reference if this transaction modified
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 06a71db..2f236cc 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@
 
 	jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-	if (f->target) {
-		kfree(f->target);
-		f->target = NULL;
-	}
-
 	fds = f->dents;
 	while(fds) {
 		fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 2266405..76aedbc 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@
 static void jffs2_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+	kfree(f->target);
+	kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index cf4c636..0b9d23b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -468,7 +468,7 @@
 		rwsem_release(&kn->dep_map, 1, _RET_IP_);
 	}
 
-	kernfs_unmap_bin_file(kn);
+	kernfs_drain_open_files(kn);
 
 	mutex_lock(&kernfs_mutex);
 }
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index d6512cd..27358c8 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -708,7 +708,8 @@
 	if (error)
 		goto err_free;
 
-	((struct seq_file *)file->private_data)->private = of;
+	of->seq_file = file->private_data;
+	of->seq_file->private = of;
 
 	/* seq_file clears PWRITE unconditionally, restore it if WRITE */
 	if (file->f_mode & FMODE_WRITE)
@@ -717,13 +718,22 @@
 	/* make sure we have open node struct */
 	error = kernfs_get_open_node(kn, of);
 	if (error)
-		goto err_close;
+		goto err_seq_release;
+
+	if (ops->open) {
+		/* nobody has access to @of yet, skip @of->mutex */
+		error = ops->open(of);
+		if (error)
+			goto err_put_node;
+	}
 
 	/* open succeeded, put active references */
 	kernfs_put_active(kn);
 	return 0;
 
-err_close:
+err_put_node:
+	kernfs_put_open_node(kn, of);
+err_seq_release:
 	seq_release(inode, file);
 err_free:
 	kfree(of->prealloc_buf);
@@ -733,11 +743,41 @@
 	return error;
 }
 
+/* used from release/drain to ensure that ->release() is called exactly once */
+static void kernfs_release_file(struct kernfs_node *kn,
+				struct kernfs_open_file *of)
+{
+	/*
+	 * @of is guaranteed to have no other file operations in flight and
+	 * we just want to synchronize release and drain paths.
+	 * @kernfs_open_file_mutex is enough.  @of->mutex can't be used
+	 * here because drain path may be called from places which can
+	 * cause circular dependency.
+	 */
+	lockdep_assert_held(&kernfs_open_file_mutex);
+
+	if (!of->released) {
+		/*
+		 * A file is never detached without being released and we
+		 * need to be able to release files which are deactivated
+		 * and being drained.  Don't use kernfs_ops().
+		 */
+		kn->attr.ops->release(of);
+		of->released = true;
+	}
+}
+
 static int kernfs_fop_release(struct inode *inode, struct file *filp)
 {
 	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
 	struct kernfs_open_file *of = kernfs_of(filp);
 
+	if (kn->flags & KERNFS_HAS_RELEASE) {
+		mutex_lock(&kernfs_open_file_mutex);
+		kernfs_release_file(kn, of);
+		mutex_unlock(&kernfs_open_file_mutex);
+	}
+
 	kernfs_put_open_node(kn, of);
 	seq_release(inode, filp);
 	kfree(of->prealloc_buf);
@@ -746,12 +786,12 @@
 	return 0;
 }
 
-void kernfs_unmap_bin_file(struct kernfs_node *kn)
+void kernfs_drain_open_files(struct kernfs_node *kn)
 {
 	struct kernfs_open_node *on;
 	struct kernfs_open_file *of;
 
-	if (!(kn->flags & KERNFS_HAS_MMAP))
+	if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
 		return;
 
 	spin_lock_irq(&kernfs_open_node_lock);
@@ -763,10 +803,17 @@
 		return;
 
 	mutex_lock(&kernfs_open_file_mutex);
+
 	list_for_each_entry(of, &on->files, list) {
 		struct inode *inode = file_inode(of->file);
-		unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+
+		if (kn->flags & KERNFS_HAS_MMAP)
+			unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+
+		if (kn->flags & KERNFS_HAS_RELEASE)
+			kernfs_release_file(kn, of);
 	}
+
 	mutex_unlock(&kernfs_open_file_mutex);
 
 	kernfs_put_open_node(kn, NULL);
@@ -786,26 +833,35 @@
  * to see if it supports poll (Neither 'poll' nor 'select' return
  * an appropriate error code).  When in doubt, set a suitable timeout value.
  */
+unsigned int kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
+{
+	struct kernfs_node *kn = of->file->f_path.dentry->d_fsdata;
+	struct kernfs_open_node *on = kn->attr.open;
+
+	poll_wait(of->file, &on->poll, wait);
+
+	if (of->event != atomic_read(&on->event))
+		return DEFAULT_POLLMASK|POLLERR|POLLPRI;
+
+	return DEFAULT_POLLMASK;
+}
+
 static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
 {
 	struct kernfs_open_file *of = kernfs_of(filp);
 	struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
-	struct kernfs_open_node *on = kn->attr.open;
+	unsigned int ret;
 
 	if (!kernfs_get_active(kn))
-		goto trigger;
+		return DEFAULT_POLLMASK|POLLERR|POLLPRI;
 
-	poll_wait(filp, &on->poll, wait);
+	if (kn->attr.ops->poll)
+		ret = kn->attr.ops->poll(of, wait);
+	else
+		ret = kernfs_generic_poll(of, wait);
 
 	kernfs_put_active(kn);
-
-	if (of->event != atomic_read(&on->event))
-		goto trigger;
-
-	return DEFAULT_POLLMASK;
-
- trigger:
-	return DEFAULT_POLLMASK|POLLERR|POLLPRI;
+	return ret;
 }
 
 static void kernfs_notify_workfn(struct work_struct *work)
@@ -965,6 +1021,8 @@
 		kn->flags |= KERNFS_HAS_SEQ_SHOW;
 	if (ops->mmap)
 		kn->flags |= KERNFS_HAS_MMAP;
+	if (ops->release)
+		kn->flags |= KERNFS_HAS_RELEASE;
 
 	rc = kernfs_add_one(kn);
 	if (rc) {
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index bfd551b..3100987 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -104,7 +104,7 @@
  */
 extern const struct file_operations kernfs_file_fops;
 
-void kernfs_unmap_bin_file(struct kernfs_node *kn);
+void kernfs_drain_open_files(struct kernfs_node *kn);
 
 /*
  * symlink.c
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ebecfb8..28d8a57 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -440,7 +440,7 @@
 	case XPRT_TRANSPORT_RDMA:
 		if (retrans == NFS_UNSPEC_RETRANS)
 			to->to_retries = NFS_DEF_TCP_RETRANS;
-		if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+		if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
 			to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
 		if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
 			to->to_initval = NFS_MAX_TCP_TIMEOUT;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index eb55ab6..6d0d94f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2748,7 +2748,8 @@
 			nfs4_schedule_stateid_recovery(server, state);
 	}
 out:
-	nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+	if (!opendata->cancelled)
+		nfs4_sequence_free_slot(&opendata->o_res.seq_res);
 	return ret;
 }
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 857af95..6f474b0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -143,6 +143,10 @@
 		/* Sustain the lease, even if it's empty.  If the clientid4
 		 * goes stale it's of no use for trunking discovery. */
 		nfs4_schedule_state_renewal(*result);
+
+		/* If the client state need to recover, do it. */
+		if (clp->cl_state)
+			nfs4_schedule_state_manager(clp);
 	}
 out:
 	return status;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 892c885..fad4d51 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -975,6 +975,17 @@
 	}
 }
 
+static void
+nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
+		struct nfs_page *req)
+{
+	LIST_HEAD(head);
+
+	nfs_list_remove_request(req);
+	nfs_list_add_request(req, &head);
+	desc->pg_completion_ops->error_cleanup(&head);
+}
+
 /**
  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
  * @desc: destination io descriptor
@@ -1012,10 +1023,8 @@
 			nfs_page_group_unlock(req);
 			desc->pg_moreio = 1;
 			nfs_pageio_doio(desc);
-			if (desc->pg_error < 0)
-				return 0;
-			if (mirror->pg_recoalesce)
-				return 0;
+			if (desc->pg_error < 0 || mirror->pg_recoalesce)
+				goto out_cleanup_subreq;
 			/* retry add_request for this subreq */
 			nfs_page_group_lock(req, false);
 			continue;
@@ -1048,6 +1057,10 @@
 	desc->pg_error = PTR_ERR(subreq);
 	nfs_page_group_unlock(req);
 	return 0;
+out_cleanup_subreq:
+	if (req != subreq)
+		nfs_pageio_cleanup_request(desc, subreq);
+	return 0;
 }
 
 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
@@ -1066,7 +1079,6 @@
 			struct nfs_page *req;
 
 			req = list_first_entry(&head, struct nfs_page, wb_list);
-			nfs_list_remove_request(req);
 			if (__nfs_pageio_add_request(desc, req))
 				continue;
 			if (desc->pg_error < 0) {
@@ -1141,11 +1153,14 @@
 		if (nfs_pgio_has_mirroring(desc))
 			desc->pg_mirror_idx = midx;
 		if (!nfs_pageio_add_request_mirror(desc, dupreq))
-			goto out_failed;
+			goto out_cleanup_subreq;
 	}
 
 	return 1;
 
+out_cleanup_subreq:
+	if (req != dupreq)
+		nfs_pageio_cleanup_request(desc, dupreq);
 out_failed:
 	/*
 	 * We might have failed before sending any reqs over wire.
@@ -1185,7 +1200,7 @@
 		desc->pg_mirror_idx = mirror_idx;
 	for (;;) {
 		nfs_pageio_doio(desc);
-		if (!mirror->pg_recoalesce)
+		if (desc->pg_error < 0 || !mirror->pg_recoalesce)
 			break;
 		if (!nfs_do_recoalesce(desc))
 			break;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 659ad12..42c3158 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2047,7 +2047,8 @@
 		memcpy(sap, &data->addr, sizeof(data->addr));
 		args->nfs_server.addrlen = sizeof(data->addr);
 		args->nfs_server.port = ntohs(data->addr.sin_port);
-		if (!nfs_verify_server_address(sap))
+		if (sap->sa_family != AF_INET ||
+		    !nfs_verify_server_address(sap))
 			goto out_no_address;
 
 		if (!(data->flags & NFS_MOUNT_TCP))
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index d818e4f..00b472f 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -431,8 +431,19 @@
 					&resp->common, nfs3svc_encode_entry);
 	memcpy(resp->verf, argp->verf, 8);
 	resp->count = resp->buffer - argp->buffer;
-	if (resp->offset)
-		xdr_encode_hyper(resp->offset, argp->cookie);
+	if (resp->offset) {
+		loff_t offset = argp->cookie;
+
+		if (unlikely(resp->offset1)) {
+			/* we ended up with offset on a page boundary */
+			*resp->offset = htonl(offset >> 32);
+			*resp->offset1 = htonl(offset & 0xffffffff);
+			resp->offset1 = NULL;
+		} else {
+			xdr_encode_hyper(resp->offset, offset);
+		}
+		resp->offset = NULL;
+	}
 
 	RETURN_STATUS(nfserr);
 }
@@ -500,6 +511,7 @@
 		} else {
 			xdr_encode_hyper(resp->offset, offset);
 		}
+		resp->offset = NULL;
 	}
 
 	RETURN_STATUS(nfserr);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 4523346..7e50248 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -899,6 +899,7 @@
 		} else {
 			xdr_encode_hyper(cd->offset, offset64);
 		}
+		cd->offset = NULL;
 	}
 
 	/*
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3069cd4..8d84228 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -934,8 +934,9 @@
 	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	if (minorversion) {
-		if (!nfsd41_cb_get_slot(clp, task))
+		if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
 			return;
+		cb->cb_holds_slot = true;
 	}
 	rpc_call_start(task);
 }
@@ -962,6 +963,9 @@
 		return true;
 	}
 
+	if (!cb->cb_holds_slot)
+		goto need_restart;
+
 	switch (cb->cb_seq_status) {
 	case 0:
 		/*
@@ -999,6 +1003,7 @@
 			cb->cb_seq_status);
 	}
 
+	cb->cb_holds_slot = false;
 	clear_bit(0, &clp->cl_cb_slot_busy);
 	rpc_wake_up_next(&clp->cl_cb_waitq);
 	dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1206,6 +1211,7 @@
 	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	cb->cb_need_restart = false;
+	cb->cb_holds_slot = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 797a155..f704f90 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1103,7 +1103,7 @@
 		case 'Y':
 		case 'y':
 		case '1':
-			if (nn->nfsd_serv)
+			if (!nn->nfsd_serv)
 				return -EBUSY;
 			nfsd4_end_grace(nn);
 			break;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 86aa92d..133d8bf 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -69,6 +69,7 @@
 	int cb_seq_status;
 	int cb_status;
 	bool cb_need_restart;
+	bool cb_holds_slot;
 };
 
 struct nfsd4_callback_ops {
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index c204ac9b..81a0d5d 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -621,13 +621,15 @@
 	struct o2nm_node *node = to_o2nm_node(item);
 	struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
 
-	o2net_disconnect_node(node);
+	if (cluster->cl_nodes[node->nd_num] == node) {
+		o2net_disconnect_node(node);
 
-	if (cluster->cl_has_local &&
-	    (cluster->cl_local_node == node->nd_num)) {
-		cluster->cl_has_local = 0;
-		cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
-		o2net_stop_listening(node);
+		if (cluster->cl_has_local &&
+		    (cluster->cl_local_node == node->nd_num)) {
+			cluster->cl_has_local = 0;
+			cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
+			o2net_stop_listening(node);
+		}
 	}
 
 	/* XXX call into net to stop this node from trading messages */
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 3494e22..bed15de 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -148,16 +148,24 @@
 	u64 blkno;
 	struct dentry *parent;
 	struct inode *dir = d_inode(child);
+	int set;
 
 	trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
 			       (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
+	status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
+	if (status < 0) {
+		mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
+		parent = ERR_PTR(status);
+		goto bail;
+	}
+
 	status = ocfs2_inode_lock(dir, NULL, 0);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
 		parent = ERR_PTR(status);
-		goto bail;
+		goto unlock_nfs_sync;
 	}
 
 	status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
@@ -166,11 +174,31 @@
 		goto bail_unlock;
 	}
 
+	status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
+	if (status < 0) {
+		if (status == -EINVAL) {
+			status = -ESTALE;
+		} else
+			mlog(ML_ERROR, "test inode bit failed %d\n", status);
+		parent = ERR_PTR(status);
+		goto bail_unlock;
+	}
+
+	trace_ocfs2_get_dentry_test_bit(status, set);
+	if (!set) {
+		status = -ESTALE;
+		parent = ERR_PTR(status);
+		goto bail_unlock;
+	}
+
 	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
 
 bail_unlock:
 	ocfs2_inode_unlock(dir, 0);
 
+unlock_nfs_sync:
+	ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
+
 bail:
 	trace_ocfs2_get_parent_end(parent);
 
diff --git a/fs/open.c b/fs/open.c
index 73b7d19..e8818ea 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -730,6 +730,12 @@
 		return 0;
 	}
 
+	/* Any file opened for execve()/uselib() has to be a regular file. */
+	if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+		error = -EACCES;
+		goto cleanup_file;
+	}
+
 	if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
 		error = get_write_access(inode);
 		if (unlikely(error))
diff --git a/fs/pipe.c b/fs/pipe.c
index 3434553..388e09a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -238,6 +238,14 @@
 	.get = generic_pipe_buf_get,
 };
 
+static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
+	.can_merge = 0,
+	.confirm = generic_pipe_buf_confirm,
+	.release = anon_pipe_buf_release,
+	.steal = anon_pipe_buf_steal,
+	.get = generic_pipe_buf_get,
+};
+
 static const struct pipe_buf_operations packet_pipe_buf_ops = {
 	.can_merge = 0,
 	.confirm = generic_pipe_buf_confirm,
@@ -246,6 +254,12 @@
 	.get = generic_pipe_buf_get,
 };
 
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
+{
+	if (buf->ops == &anon_pipe_buf_ops)
+		buf->ops = &anon_pipe_buf_nomerge_ops;
+}
+
 static ssize_t
 pipe_read(struct kiocb *iocb, struct iov_iter *to)
 {
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index aec66e6..e4b986e 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -3,13 +3,11 @@
 #include <linux/pid_namespace.h>
 #include <linux/proc_fs.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/seq_file.h>
 #include <linux/seqlock.h>
 #include <linux/time.h>
 
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
 	unsigned long avnrun[3];
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 1999e85..5b32c05 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1604,8 +1604,11 @@
 	if (--header->nreg)
 		return;
 
-	put_links(header);
-	start_unregistering(header);
+	if (parent) {
+		put_links(header);
+		start_unregistering(header);
+	}
+
 	if (!--header->count)
 		kfree_rcu(header, rcu);
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e61acca..32ddba9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -517,7 +517,7 @@
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
-		bool compound, bool young, bool dirty)
+		bool compound, bool young, bool dirty, bool locked)
 {
 	int i, nr = compound ? 1 << compound_order(page) : 1;
 	unsigned long size = nr * PAGE_SIZE;
@@ -541,24 +541,31 @@
 		else
 			mss->private_clean += size;
 		mss->pss += (u64)size << PSS_SHIFT;
+		if (locked)
+			mss->pss_locked += (u64)size << PSS_SHIFT;
 		return;
 	}
 
 	for (i = 0; i < nr; i++, page++) {
 		int mapcount = page_mapcount(page);
+		unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
 
 		if (mapcount >= 2) {
 			if (dirty || PageDirty(page))
 				mss->shared_dirty += PAGE_SIZE;
 			else
 				mss->shared_clean += PAGE_SIZE;
-			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+			mss->pss += pss / mapcount;
+			if (locked)
+				mss->pss_locked += pss / mapcount;
 		} else {
 			if (dirty || PageDirty(page))
 				mss->private_dirty += PAGE_SIZE;
 			else
 				mss->private_clean += PAGE_SIZE;
-			mss->pss += PAGE_SIZE << PSS_SHIFT;
+			mss->pss += pss;
+			if (locked)
+				mss->pss_locked += pss;
 		}
 	}
 }
@@ -581,6 +588,7 @@
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
 
 	if (pte_present(*pte)) {
@@ -621,7 +629,7 @@
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -630,6 +638,7 @@
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page;
 
 	/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -644,7 +653,7 @@
 		/* pass */;
 	else
 		VM_BUG_ON_PAGE(1, page);
-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -840,11 +849,8 @@
 		}
 	}
 #endif
-
 	/* mmap_sem is held in m_start */
 	walk_page_vma(vma, &smaps_walk);
-	if (vma->vm_flags & VM_LOCKED)
-		mss->pss_locked += mss->pss;
 
 	if (!rollup_mode) {
 		show_map_vma(m, vma, is_pid);
diff --git a/fs/read_write.c b/fs/read_write.c
index c06d7f9..ce77e76 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1196,6 +1196,9 @@
 		const struct compat_iovec __user *,vec,
 		unsigned long, vlen, loff_t, pos, int, flags)
 {
+	if (pos == -1)
+		return do_compat_readv(fd, vec, vlen, flags);
+
 	return do_compat_preadv64(fd, vec, vlen, pos, flags);
 }
 #endif
@@ -1302,6 +1305,9 @@
 		const struct compat_iovec __user *,vec,
 		unsigned long, vlen, loff_t, pos, int, flags)
 {
+	if (pos == -1)
+		return do_compat_writev(fd, vec, vlen, flags);
+
 	return do_compat_pwritev64(fd, vec, vlen, pos, flags);
 }
 #endif
diff --git a/fs/splice.c b/fs/splice.c
index 8dd79ec..01983be 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1594,6 +1594,8 @@
 			 */
 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
 
+			pipe_buf_mark_unmergeable(obuf);
+
 			obuf->len = len;
 			opipe->nrbufs++;
 			ibuf->offset += obuf->len;
@@ -1668,6 +1670,8 @@
 		 */
 		obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
 
+		pipe_buf_mark_unmergeable(obuf);
+
 		if (obuf->len > len)
 			obuf->len = len;
 
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 42b8c57..c6ce750 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -260,6 +260,9 @@
 			epos.block = eloc;
 			epos.bh = udf_tread(sb,
 					udf_get_lb_pblock(sb, &eloc, 0));
+			/* Error reading indirect block? */
+			if (!epos.bh)
+				return;
 			if (elen)
 				indirect_ext_len =
 					(elen + sb->s_blocksize - 1) >>
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 398019f..9c4fb1f 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -228,7 +228,7 @@
 	case UFS_UID_44BSD:
 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
 	case UFS_UID_EFT:
-		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
+		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
 		/* Fall through */
 	default:
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 12c2882..eef0696 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -122,7 +122,7 @@
 
 /* Maximum object reference count (detects object deletion issues) */
 
-#define ACPI_MAX_REFERENCE_COUNT        0x1000
+#define ACPI_MAX_REFERENCE_COUNT        0x4000
 
 /* Default page size for use in mapping memory for operation regions */
 
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 73fd8b7..af43ed4 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -150,19 +150,29 @@
 extern int sysctl_aarp_resolve_time;
 
 #ifdef CONFIG_SYSCTL
-extern void atalk_register_sysctl(void);
+extern int atalk_register_sysctl(void);
 extern void atalk_unregister_sysctl(void);
 #else
-#define atalk_register_sysctl()		do { } while(0)
-#define atalk_unregister_sysctl()	do { } while(0)
+static inline int atalk_register_sysctl(void)
+{
+	return 0;
+}
+static inline void atalk_unregister_sysctl(void)
+{
+}
 #endif
 
 #ifdef CONFIG_PROC_FS
 extern int atalk_proc_init(void);
 extern void atalk_proc_exit(void);
 #else
-#define atalk_proc_init()	({ 0; })
-#define atalk_proc_exit()	do { } while(0)
+static inline int atalk_proc_init(void)
+{
+	return 0;
+}
+static inline void atalk_proc_exit(void)
+{
+}
 #endif /* CONFIG_PROC_FS */
 
 #endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
old mode 100644
new mode 100755
index e69fd34..d7336e5
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -162,6 +162,8 @@
 #ifdef CONFIG_CGROUP_WRITEBACK
 	struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
 	struct rb_root cgwb_congested_tree; /* their congested states */
+	atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+	struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
 #else
 	struct bdi_writeback_congested *wb_congested;
 #endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a83c822..d4b167f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,28 +1,9 @@
 #ifndef _LINUX_BITOPS_H
 #define _LINUX_BITOPS_H
 #include <asm/types.h>
+#include <linux/bits.h>
 
-#ifdef	__KERNEL__
-#define BIT(nr)			(1UL << (nr))
-#define BIT_ULL(nr)		(1ULL << (nr))
-#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE		8
 #define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
-	(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
-	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
 
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8..333e42c 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -31,32 +31,32 @@
 
 #define __constant_bitrev32(x)	\
 ({					\
-	u32 __x = x;			\
-	__x = (__x >> 16) | (__x << 16);	\
-	__x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8);	\
-	__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);	\
-	__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);	\
-	__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);	\
-	__x;								\
+	u32 ___x = x;			\
+	___x = (___x >> 16) | (___x << 16);	\
+	___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8);	\
+	___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);	\
+	___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);	\
+	___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);	\
+	___x;								\
 })
 
 #define __constant_bitrev16(x)	\
 ({					\
-	u16 __x = x;			\
-	__x = (__x >> 8) | (__x << 8);	\
-	__x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);	\
-	__x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);	\
-	__x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);	\
-	__x;								\
+	u16 ___x = x;			\
+	___x = (___x >> 8) | (___x << 8);	\
+	___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);	\
+	___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);	\
+	___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);	\
+	___x;								\
 })
 
 #define __constant_bitrev8(x)	\
 ({					\
-	u8 __x = x;			\
-	__x = (__x >> 4) | (__x << 4);	\
-	__x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);	\
-	__x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);	\
-	__x;								\
+	u8 ___x = x;			\
+	___x = (___x >> 4) | (___x << 4);	\
+	___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);	\
+	___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);	\
+	___x;								\
 })
 
 #define bitrev32(x) \
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 0000000..2b7b532
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr)			(1UL << (nr))
+#define BIT_ULL(nr)		(1ULL << (nr))
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)	(1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)	((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE		8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+	(((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+	(((~0ULL) - (1ULL << (l)) + 1) & \
+	 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif	/* __LINUX_BITS_H */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index a8a5748..fe75751 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -276,6 +276,8 @@
 extern int __ceph_open_session(struct ceph_client *client,
 			       unsigned long started);
 extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+				unsigned long timeout);
 
 /* pagevec.c */
 extern void ceph_release_page_vector(struct page **pages, int num_pages);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 34d3f27..ce0f32d 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -17,6 +17,7 @@
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/bpf-cgroup.h>
+#include <linux/psi_types.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -28,6 +29,7 @@
 struct kernfs_ops;
 struct kernfs_open_file;
 struct seq_file;
+struct poll_table_struct;
 
 #define MAX_CGROUP_TYPE_NAMELEN 32
 #define MAX_CGROUP_ROOT_NAMELEN 64
@@ -302,6 +304,9 @@
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to track pressure stalls */
+	struct psi_group psi;
+
 	/* used to store eBPF programs */
 	struct cgroup_bpf bpf;
 
@@ -389,6 +394,9 @@
 	struct list_head node;		/* anchored at ss->cfts */
 	struct kernfs_ops *kf_ops;
 
+	int (*open)(struct kernfs_open_file *of);
+	void (*release)(struct kernfs_open_file *of);
+
 	/*
 	 * read_u64() is a shortcut for the common case of returning a
 	 * single integer. Use it in place of read()
@@ -429,6 +437,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of,
 			 char *buf, size_t nbytes, loff_t off);
 
+	unsigned int (*poll)(struct kernfs_open_file *of,
+			 struct poll_table_struct *pt);
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lock_class_key	lockdep_key;
 #endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
old mode 100644
new mode 100755
index 3b242a3..5a232e8
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -388,6 +388,16 @@
 		percpu_ref_put_many(&css->refcnt, n);
 }
 
+static inline void cgroup_get(struct cgroup *cgrp)
+{
+	css_get(&cgrp->self);
+}
+
+static inline bool cgroup_tryget(struct cgroup *cgrp)
+{
+	return css_tryget(&cgrp->self);
+}
+
 static inline void cgroup_put(struct cgroup *cgrp)
 {
 	css_put(&cgrp->self);
@@ -500,6 +510,20 @@
 	return task_css(task, subsys_id)->cgroup;
 }
 
+static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
+{
+	return task_css_set(task)->dfl_cgrp;
+}
+
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
+
+	if (parent_css)
+		return container_of(parent_css, struct cgroup, self);
+	return NULL;
+}
+
 /**
  * cgroup_is_descendant - test ancestry
  * @cgrp: the cgroup to be tested
@@ -599,6 +623,11 @@
  */
 int subsys_cgroup_allow_attach(struct cgroup_taskset *tset);
 
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+	return &cgrp->psi;
+}
+
 static inline void cgroup_init_kthreadd(void)
 {
 	/*
@@ -641,6 +670,16 @@
 static inline void cgroup_init_kthreadd(void) {}
 static inline void cgroup_kthread_ready(void) {}
 
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+	return NULL;
+}
+
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+	return NULL;
+}
+
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 					       struct cgroup *ancestor)
 {
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index fc3192b..fcdc27c 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -54,6 +54,8 @@
 					  struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_l1tf(struct device *dev,
 			     struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+			    struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -278,6 +280,30 @@
 static inline void cpu_smt_check_topology(void) { }
 #endif
 
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+	CPU_MITIGATIONS_OFF,
+	CPU_MITIGATIONS_AUTO,
+	CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+extern enum cpu_mitigations cpu_mitigations;
+
+/* mitigations=off */
+static inline bool cpu_mitigations_off(void)
+{
+	return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+
+/* mitigations=auto,nosmt */
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+
 #define IDLE_START 1
 #define IDLE_END 2
 
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6cee17c..84c2f68 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -29,7 +29,48 @@
 #define DELAYACCT_PF_BLKIO	0x00000002	/* I am waiting on IO */
 
 #ifdef CONFIG_TASK_DELAY_ACCT
+struct task_delay_info {
+	spinlock_t	lock;
+	unsigned int	flags;	/* Private per-task flags */
 
+	/* For each stat XXX, add following, aligned appropriately
+	 *
+	 * struct timespec XXX_start, XXX_end;
+	 * u64 XXX_delay;
+	 * u32 XXX_count;
+	 *
+	 * Atomicity of updates to XXX_delay, XXX_count protected by
+	 * single lock above (split into XXX_lock if contention is an issue).
+	 */
+
+	/*
+	 * XXX_count is incremented on every XXX operation, the delay
+	 * associated with the operation is added to XXX_delay.
+	 * XXX_delay contains the accumulated delay time in nanoseconds.
+	 */
+	u64 blkio_start;	/* Shared by blkio, swapin */
+	u64 blkio_delay;	/* wait for sync block io completion */
+	u64 swapin_delay;	/* wait for swapin block io completion */
+	u32 blkio_count;	/* total count of the number of sync block */
+				/* io operations performed */
+	u32 swapin_count;	/* total count of the number of swapin block */
+				/* io operations performed */
+
+	u64 freepages_start;
+	u64 freepages_delay;	/* wait for memory reclaim */
+
+	u64 thrashing_start;
+	u64 thrashing_delay;	/* wait for thrashing page */
+
+	u32 freepages_count;	/* total count of memory reclaim */
+	u32 thrashing_count;	/* total count of thrash waits */
+};
+#endif
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_TASK_DELAY_ACCT
 extern int delayacct_on;	/* Delay accounting turned on/off */
 extern struct kmem_cache *delayacct_cache;
 extern void delayacct_init(void);
@@ -41,6 +82,8 @@
 extern __u64 __delayacct_blkio_ticks(struct task_struct *);
 extern void __delayacct_freepages_start(void);
 extern void __delayacct_freepages_end(void);
+extern void __delayacct_thrashing_start(void);
+extern void __delayacct_thrashing_end(void);
 
 static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
 {
@@ -121,6 +164,18 @@
 		__delayacct_freepages_end();
 }
 
+static inline void delayacct_thrashing_start(void)
+{
+	if (current->delays)
+		__delayacct_thrashing_start();
+}
+
+static inline void delayacct_thrashing_end(void)
+{
+	if (current->delays)
+		__delayacct_thrashing_end();
+}
+
 #else
 static inline void delayacct_set_flag(int flag)
 {}
@@ -147,6 +202,10 @@
 {}
 static inline void delayacct_freepages_end(void)
 {}
+static inline void delayacct_thrashing_start(void)
+{}
+static inline void delayacct_thrashing_end(void)
+{}
 
 #endif /* CONFIG_TASK_DELAY_ACCT */
 
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 20e26d9..2b37e89 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -640,7 +640,7 @@
  */
 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
 
-static inline sector_t to_sector(unsigned long n)
+static inline sector_t to_sector(unsigned long long n)
 {
 	return (n >> SECTOR_SHIFT);
 }
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2877ccb..02c4f16 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1433,7 +1433,12 @@
 			   struct screen_info *si, efi_guid_t *proto,
 			   unsigned long size);
 
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 
 /*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index d771104..6555990 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -116,6 +116,7 @@
 /*
  * For checkpoint
  */
+#define CP_DISABLED_QUICK_FLAG		0x00002000
 #define CP_DISABLED_FLAG		0x00001000
 #define CP_QUOTA_NEED_FSCK_FLAG		0x00000800
 #define CP_LARGE_NAT_BITMAP_FLAG	0x00000400
@@ -163,6 +164,10 @@
 	unsigned char sit_nat_version_bitmap[1];
 } __packed;
 
+#define CP_CHKSUM_OFFSET	4092	/* default chksum offset in checkpoint */
+#define CP_MIN_CHKSUM_OFFSET						\
+	(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
+
 /*
  * For orphan inode management
  */
@@ -186,7 +191,7 @@
 struct f2fs_extent {
 	__le32 fofs;		/* start file offset of the extent */
 	__le32 blk;		/* start block address of the extent */
-	__le32 len;		/* lengh of the extent */
+	__le32 len;		/* length of the extent */
 } __packed;
 
 #define F2FS_NAME_LEN		255
@@ -197,11 +202,12 @@
 					get_extra_isize(inode))
 #define DEF_NIDS_PER_INODE	5	/* Node IDs in an Inode */
 #define ADDRS_PER_INODE(inode)	addrs_per_inode(inode)
-#define ADDRS_PER_BLOCK		1018	/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK	1018	/* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(inode)	addrs_per_block(inode)
 #define NIDS_PER_BLOCK		1018	/* Node IDs in an Indirect Block */
 
 #define ADDRS_PER_PAGE(page, inode)	\
-	(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+	(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
 
 #define	NODE_DIR1_BLOCK		(DEF_ADDRS_PER_INODE + 1)
 #define	NODE_DIR2_BLOCK		(DEF_ADDRS_PER_INODE + 2)
@@ -266,7 +272,7 @@
 } __packed;
 
 struct direct_node {
-	__le32 addr[ADDRS_PER_BLOCK];	/* array of data block address */
+	__le32 addr[DEF_ADDRS_PER_BLOCK];	/* array of data block address */
 } __packed;
 
 struct indirect_node {
@@ -284,7 +290,7 @@
 
 struct node_footer {
 	__le32 nid;		/* node id */
-	__le32 ino;		/* inode nunmber */
+	__le32 ino;		/* inode number */
 	__le32 flag;		/* include cold/fsync/dentry marks and offset */
 	__le64 cp_ver;		/* checkpoint version */
 	__le32 next_blkaddr;	/* next node page block address */
@@ -489,12 +495,12 @@
 
 /*
  * space utilization of regular dentry and inline dentry (w/o extra reservation)
- *		regular dentry			inline dentry
- * bitmap	1 * 27 = 27			1 * 23 = 23
- * reserved	1 * 3 = 3			1 * 7 = 7
- * dentry	11 * 214 = 2354			11 * 182 = 2002
- * filename	8 * 214 = 1712			8 * 182 = 1456
- * total	4096				3488
+ *		regular dentry		inline dentry (def)	inline dentry (min)
+ * bitmap	1 * 27 = 27		1 * 23 = 23		1 * 1 = 1
+ * reserved	1 * 3 = 3		1 * 7 = 7		1 * 1 = 1
+ * dentry	11 * 214 = 2354		11 * 182 = 2002		11 * 2 = 22
+ * filename	8 * 214 = 1712		8 * 182 = 1456		8 * 2 = 16
+ * total	4096			3488			40
  *
  * Note: there are more reserved space in inline dentry than in regular
  * dentry, when converting inline dentry we should handle this carefully.
@@ -506,12 +512,13 @@
 #define SIZE_OF_RESERVED	(PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
 				F2FS_SLOT_LEN) * \
 				NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+#define MIN_INLINE_DENTRY_SIZE		40	/* just include '.' and '..' entries */
 
 /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
 struct f2fs_dir_entry {
 	__le32 hash_code;	/* hash code of file name */
 	__le32 ino;		/* inode number */
-	__le16 name_len;	/* lengh of file name */
+	__le16 name_len;	/* length of file name */
 	__u8 file_type;		/* file type */
 } __packed;
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5b39660..9ca2121 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -721,7 +721,7 @@
 	struct hlist_head	i_fsnotify_marks;
 #endif
 
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
 	struct fscrypt_info	*i_crypt_info;
 #endif
 
@@ -1384,8 +1384,9 @@
 	void                    *s_security;
 #endif
 	const struct xattr_handler **s_xattr;
-
+#ifdef CONFIG_FS_ENCRYPTION
 	const struct fscrypt_operations	*s_cop;
+#endif
 
 	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index f5371c2..aa5280a 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -1,9 +1,8 @@
 /*
  * fscrypt.h: declarations for per-file encryption
  *
- * Filesystems that implement per-file encryption include this header
- * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
- * is being built with encryption support or not.
+ * Filesystems that implement per-file encryption must include this header
+ * file.
  *
  * Copyright (C) 2015, Google, Inc.
  *
@@ -14,6 +13,8 @@
 #define _LINUX_FSCRYPT_H
 
 #include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
 
 #define FS_CRYPTO_BLOCK_SIZE		16
 
@@ -43,11 +44,410 @@
 #define fname_name(p)		((p)->disk_name.name)
 #define fname_len(p)		((p)->disk_name.len)
 
-#if __FS_HAS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
+#ifdef CONFIG_FS_ENCRYPTION
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto operations for filesystems
+ */
+struct fscrypt_operations {
+	unsigned int flags;
+	const char *key_prefix;
+	int (*get_context)(struct inode *, void *, size_t);
+	int (*set_context)(struct inode *, const void *, size_t, void *);
+	bool (*dummy_context)(struct inode *);
+	bool (*empty_dir)(struct inode *);
+	unsigned int max_namelen;
+};
+
+struct fscrypt_ctx {
+	union {
+		struct {
+			struct page *bounce_page;	/* Ciphertext page */
+			struct page *control_page;	/* Original page  */
+		} w;
+		struct {
+			struct bio *bio;
+			struct work_struct work;
+		} r;
+		struct list_head free_list;	/* Free list */
+	};
+	u8 flags;				/* Flags */
+};
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return (inode->i_crypt_info != NULL);
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return inode->i_sb->s_cop->dummy_context &&
+		inode->i_sb->s_cop->dummy_context(inode);
+}
+
+/* crypto.c */
+extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+						unsigned int, unsigned int,
+						u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+				unsigned int, u64);
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
+extern void fscrypt_restore_control_page(struct page *);
+
+/* policy.c */
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+					void *, bool);
+/* keyinfo.c */
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+				int lookup, struct fscrypt_name *);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+	kfree(fname->crypto_buf.name);
+}
+
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
+				struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+			const struct fscrypt_str *, struct fscrypt_str *);
+
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE	32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len)	\
+	((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+			     FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE	FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded).  This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename.  Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext.  (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".)  This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key.  Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+	u32 hash;
+	u32 minor_hash;
+	u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry.  The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+				      const u8 *de_name, u32 de_name_len)
+{
+	if (unlikely(!fname->disk_name.name)) {
+		const struct fscrypt_digested_name *n =
+			(const void *)fname->crypto_buf.name;
+		if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+			return false;
+		if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+			return false;
+		return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+			       n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+	}
+
+	if (de_name_len != fname->disk_name.len)
+		return false;
+	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+extern void fscrypt_decrypt_bio(struct bio *);
+extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					struct bio *bio);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
+				 unsigned int);
+
+/* hooks.c */
+extern int fscrypt_file_open(struct inode *inode, struct file *filp);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_rename(struct inode *old_dir,
+				    struct dentry *old_dentry,
+				    struct inode *new_dir,
+				    struct dentry *new_dentry,
+				    unsigned int flags);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
+				     unsigned int max_len,
+				     struct fscrypt_str *disk_link);
+extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+				     unsigned int len,
+				     struct fscrypt_str *disk_link);
+extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+				       unsigned int max_size,
+				       struct delayed_call *done);
+#else  /* !CONFIG_FS_ENCRYPTION */
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+	return false;
+}
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+	return false;
+}
+
+/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
+static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
+						  gfp_t gfp_flags)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+	return;
+}
+
+static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
+						struct page *page,
+						unsigned int len,
+						unsigned int offs,
+						u64 lblk_num, gfp_t gfp_flags)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_decrypt_page(const struct inode *inode,
+				       struct page *page,
+				       unsigned int len, unsigned int offs,
+				       u64 lblk_num)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+	WARN_ON_ONCE(1);
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void fscrypt_restore_control_page(struct page *page)
+{
+	return;
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+					   const void __user *arg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+						struct inode *child)
+{
+	return 0;
+}
+
+static inline int fscrypt_inherit_context(struct inode *parent,
+					  struct inode *child,
+					  void *fs_data, bool preload)
+{
+	return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_get_encryption_info(struct inode *inode)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode)
+{
+	return;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+					 const struct qstr *iname,
+					 int lookup, struct fscrypt_name *fname)
+{
+	if (IS_ENCRYPTED(dir))
+		return -EOPNOTSUPP;
+
+	memset(fname, 0, sizeof(struct fscrypt_name));
+	fname->usr_fname = iname;
+	fname->disk_name.name = (unsigned char *)iname->name;
+	fname->disk_name.len = iname->len;
+	return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+	return;
+}
+
+static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
+					     u32 max_encrypted_len,
+					     struct fscrypt_str *crypto_str)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+	return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+					    u32 hash, u32 minor_hash,
+					    const struct fscrypt_str *iname,
+					    struct fscrypt_str *oname)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+				      const u8 *de_name, u32 de_name_len)
+{
+	/* Encryption support disabled; use standard comparison */
+	if (de_name_len != fname->disk_name.len)
+		return false;
+	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+static inline void fscrypt_decrypt_bio(struct bio *bio)
+{
+}
+
+static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
+					       struct bio *bio)
+{
+}
+
+static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+	return;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+					sector_t pblk, unsigned int len)
+{
+	return -EOPNOTSUPP;
+}
+
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+	if (IS_ENCRYPTED(inode))
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode,
+					 struct inode *dir)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+					   struct dentry *old_dentry,
+					   struct inode *new_dir,
+					   struct dentry *new_dentry,
+					   unsigned int flags)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+					   struct dentry *dentry)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_symlink(struct inode *dir,
+					    unsigned int len,
+					    unsigned int max_len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+					    const char *target,
+					    unsigned int len,
+					    struct fscrypt_str *disk_link)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+					      const void *caddr,
+					      unsigned int max_size,
+					      struct delayed_call *done)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+#endif	/* !CONFIG_FS_ENCRYPTION */
 
 /**
  * fscrypt_require_key - require an inode's encryption key
@@ -90,7 +490,7 @@
  * in an encrypted directory tree use the same encryption policy.
  *
  * Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
- * -EPERM if the link would result in an inconsistent encryption policy, or
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
  * another -errno code.
  */
 static inline int fscrypt_prepare_link(struct dentry *old_dentry,
@@ -120,7 +520,7 @@
  * We also verify that the rename will not violate the constraint that all files
  * in an encrypted directory tree use the same encryption policy.
  *
- * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
  * rename would cause inconsistent encryption policies, or another -errno code.
  */
 static inline int fscrypt_prepare_rename(struct inode *old_dir,
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
deleted file mode 100644
index 5017122..0000000
--- a/include/linux/fscrypt_notsupp.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * fscrypt_notsupp.h
- *
- * This stubs out the fscrypt functions for filesystems configured without
- * encryption support.
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_notsupp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_NOTSUPP_H
-#define _LINUX_FSCRYPT_NOTSUPP_H
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return false;
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
-	return false;
-}
-
-/* crypto.c */
-static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
-{
-}
-
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
-						  gfp_t gfp_flags)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
-	return;
-}
-
-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
-						struct page *page,
-						unsigned int len,
-						unsigned int offs,
-						u64 lblk_num, gfp_t gfp_flags)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_decrypt_page(const struct inode *inode,
-				       struct page *page,
-				       unsigned int len, unsigned int offs,
-				       u64 lblk_num)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	WARN_ON_ONCE(1);
-	return ERR_PTR(-EINVAL);
-}
-
-static inline void fscrypt_restore_control_page(struct page *page)
-{
-	return;
-}
-
-/* policy.c */
-static inline int fscrypt_ioctl_set_policy(struct file *filp,
-					   const void __user *arg)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_has_permitted_context(struct inode *parent,
-						struct inode *child)
-{
-	return 0;
-}
-
-static inline int fscrypt_inherit_context(struct inode *parent,
-					  struct inode *child,
-					  void *fs_data, bool preload)
-{
-	return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_put_encryption_info(struct inode *inode)
-{
-	return;
-}
-
- /* fname.c */
-static inline int fscrypt_setup_filename(struct inode *dir,
-					 const struct qstr *iname,
-					 int lookup, struct fscrypt_name *fname)
-{
-	if (IS_ENCRYPTED(dir))
-		return -EOPNOTSUPP;
-
-	memset(fname, 0, sizeof(struct fscrypt_name));
-	fname->usr_fname = iname;
-	fname->disk_name.name = (unsigned char *)iname->name;
-	fname->disk_name.len = iname->len;
-	return 0;
-}
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
-	return;
-}
-
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
-					     u32 max_encrypted_len,
-					     struct fscrypt_str *crypto_str)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
-{
-	return;
-}
-
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
-					    u32 hash, u32 minor_hash,
-					    const struct fscrypt_str *iname,
-					    struct fscrypt_str *oname)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
-				      const u8 *de_name, u32 de_name_len)
-{
-	/* Encryption support disabled; use standard comparison */
-	if (de_name_len != fname->disk_name.len)
-		return false;
-	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
-{
-}
-
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
-					       struct bio *bio)
-{
-}
-
-static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
-	return;
-}
-
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
-					sector_t pblk, unsigned int len)
-{
-	return -EOPNOTSUPP;
-}
-
-/* hooks.c */
-static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
-{
-	if (IS_ENCRYPTED(inode))
-		return -EOPNOTSUPP;
-	return 0;
-}
-
-static inline int __fscrypt_prepare_link(struct inode *inode,
-					 struct inode *dir)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_rename(struct inode *old_dir,
-					   struct dentry *old_dentry,
-					   struct inode *new_dir,
-					   struct dentry *new_dentry,
-					   unsigned int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_lookup(struct inode *dir,
-					   struct dentry *dentry)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_prepare_symlink(struct inode *dir,
-					    unsigned int len,
-					    unsigned int max_len,
-					    struct fscrypt_str *disk_link)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int __fscrypt_encrypt_symlink(struct inode *inode,
-					    const char *target,
-					    unsigned int len,
-					    struct fscrypt_str *disk_link)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline const char *fscrypt_get_symlink(struct inode *inode,
-					      const void *caddr,
-					      unsigned int max_size,
-					      struct delayed_call *done)
-{
-	return ERR_PTR(-EOPNOTSUPP);
-}
-
-#endif	/* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
deleted file mode 100644
index 99b6c52..0000000
--- a/include/linux/fscrypt_supp.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * fscrypt_supp.h
- *
- * Do not include this file directly. Use fscrypt.h instead!
- */
-#ifndef _LINUX_FSCRYPT_H
-#error "Incorrect include of linux/fscrypt_supp.h!"
-#endif
-
-#ifndef _LINUX_FSCRYPT_SUPP_H
-#define _LINUX_FSCRYPT_SUPP_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto operations for filesystems
- */
-struct fscrypt_operations {
-	unsigned int flags;
-	const char *key_prefix;
-	int (*get_context)(struct inode *, void *, size_t);
-	int (*set_context)(struct inode *, const void *, size_t, void *);
-	bool (*dummy_context)(struct inode *);
-	bool (*empty_dir)(struct inode *);
-	unsigned int max_namelen;
-};
-
-struct fscrypt_ctx {
-	union {
-		struct {
-			struct page *bounce_page;	/* Ciphertext page */
-			struct page *control_page;	/* Original page  */
-		} w;
-		struct {
-			struct bio *bio;
-			struct work_struct work;
-		} r;
-		struct list_head free_list;	/* Free list */
-	};
-	u8 flags;				/* Flags */
-};
-
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
-{
-	return (inode->i_crypt_info != NULL);
-}
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
-	return inode->i_sb->s_cop->dummy_context &&
-		inode->i_sb->s_cop->dummy_context(inode);
-}
-
-/* crypto.c */
-extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
-						unsigned int, unsigned int,
-						u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
-				unsigned int, u64);
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-	return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-}
-
-extern void fscrypt_restore_control_page(struct page *);
-
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
-					void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
-				int lookup, struct fscrypt_name *);
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
-	kfree(fname->crypto_buf.name);
-}
-
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
-				struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
-			const struct fscrypt_str *, struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE	32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len)	\
-	((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
-			     FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE	FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded).  This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename.  Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext.  (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".)  This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key.  Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
-	u32 hash;
-	u32 minor_hash;
-	u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry.  The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
-				      const u8 *de_name, u32 de_name_len)
-{
-	if (unlikely(!fname->disk_name.name)) {
-		const struct fscrypt_digested_name *n =
-			(const void *)fname->crypto_buf.name;
-		if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
-			return false;
-		if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
-			return false;
-		return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
-			       n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
-	}
-
-	if (de_name_len != fname->disk_name.len)
-		return false;
-	return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
-					struct bio *bio);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
-				 unsigned int);
-
-/* hooks.c */
-extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
-extern int __fscrypt_prepare_rename(struct inode *old_dir,
-				    struct dentry *old_dentry,
-				    struct inode *new_dir,
-				    struct dentry *new_dentry,
-				    unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
-extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
-				     unsigned int max_len,
-				     struct fscrypt_str *disk_link);
-extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
-				     unsigned int len,
-				     struct fscrypt_str *disk_link);
-extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
-				       unsigned int max_size,
-				       struct delayed_call *done);
-
-#endif	/* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/input/cyttsp5_core.h b/include/linux/input/cyttsp5_core.h
new file mode 100644
index 0000000..04e5199
--- /dev/null
+++ b/include/linux/input/cyttsp5_core.h
@@ -0,0 +1,179 @@
+/*
+ * cyttsp5_core.h
+ * Parade TrueTouch(TM) Standard Product V5 Core Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015 Parade Semiconductor
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Semiconductor at www.parade.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#ifndef _LINUX_CYTTSP5_CORE_H
+#define _LINUX_CYTTSP5_CORE_H
+
+#include <linux/stringify.h>
+
+#define CYTTSP5_I2C_NAME "cyttsp5_i2c_adapter"
+#define CYTTSP5_SPI_NAME "cyttsp5_spi_adapter"
+
+#define CYTTSP5_CORE_NAME "cyttsp5_core"
+#define CYTTSP5_MT_NAME "cyttsp5_mt"
+#define CYTTSP5_BTN_NAME "cyttsp5_btn"
+#define CYTTSP5_PROXIMITY_NAME "cyttsp5_proximity"
+
+#define CY_DRIVER_NAME TTDA
+#define CY_DRIVER_MAJOR 03
+#define CY_DRIVER_MINOR 08
+
+#define CY_DRIVER_REVCTRL 874312
+
+#define CY_DRIVER_VERSION		    \
+__stringify(CY_DRIVER_NAME)		    \
+"." __stringify(CY_DRIVER_MAJOR)	    \
+"." __stringify(CY_DRIVER_MINOR)	    \
+"." __stringify(CY_DRIVER_REVCTRL)
+
+#define CY_DRIVER_DATE "20170125"
+
+/* abs settings */
+#define CY_IGNORE_VALUE             -1
+
+enum cyttsp5_core_platform_flags {
+	CY_CORE_FLAG_NONE,
+	CY_CORE_FLAG_POWEROFF_ON_SLEEP = 0x02,
+	CY_CORE_FLAG_RESTORE_PARAMETERS = 0x04,
+};
+
+enum cyttsp5_core_platform_easy_wakeup_gesture {
+	CY_CORE_EWG_NONE,
+	CY_CORE_EWG_TAP_TAP,
+	CY_CORE_EWG_TWO_FINGER_SLIDE,
+	CY_CORE_EWG_RESERVED,
+	CY_CORE_EWG_WAKE_ON_INT_FROM_HOST = 0xFF,
+};
+
+enum cyttsp5_loader_platform_flags {
+	CY_LOADER_FLAG_NONE,
+	CY_LOADER_FLAG_CALIBRATE_AFTER_FW_UPGRADE,
+	/* Use CONFIG_VER field in TT_CFG to decide TT_CFG update */
+	CY_LOADER_FLAG_CHECK_TTCONFIG_VERSION,
+	CY_LOADER_FLAG_CALIBRATE_AFTER_TTCONFIG_UPGRADE,
+};
+
+struct touch_settings {
+	const uint8_t   *data;
+	uint32_t         size;
+	uint8_t         tag;
+};
+
+struct cyttsp5_touch_firmware {
+	const uint8_t *img;
+	uint32_t size;
+	const uint8_t *ver;
+	uint8_t vsize;
+	uint8_t panel_id;
+};
+
+struct cyttsp5_touch_config {
+	struct touch_settings *param_regs;
+	struct touch_settings *param_size;
+	const uint8_t *fw_ver;
+	uint8_t fw_vsize;
+	uint8_t panel_id;
+};
+
+struct cyttsp5_loader_platform_data {
+	struct cyttsp5_touch_firmware *fw;
+	struct cyttsp5_touch_config *ttconfig;
+	struct cyttsp5_touch_firmware **fws;
+	struct cyttsp5_touch_config **ttconfigs;
+	u32 flags;
+};
+
+typedef int (*cyttsp5_platform_read) (struct device *dev, void *buf, int size);
+
+#define CY_TOUCH_SETTINGS_MAX 32
+
+struct cyttsp5_core_platform_data {
+	int irq_gpio;
+	int rst_gpio;
+	int level_irq_udelay;
+	u16 hid_desc_register;
+	u16 vendor_id;
+	u16 product_id;
+
+	int (*xres)(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev);
+	int (*init)(struct cyttsp5_core_platform_data *pdata,
+		int on, struct device *dev);
+	int (*power)(struct cyttsp5_core_platform_data *pdata,
+		int on, struct device *dev, atomic_t *ignore_irq);
+	int (*detect)(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev, cyttsp5_platform_read read);
+	int (*irq_stat)(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev);
+	struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
+	u32 flags;
+	u8 easy_wakeup_gesture;
+};
+
+struct touch_framework {
+	const int16_t  *abs;
+	uint8_t         size;
+	uint8_t         enable_vkeys;
+} __packed;
+
+enum cyttsp5_mt_platform_flags {
+	CY_MT_FLAG_NONE,
+	CY_MT_FLAG_HOVER = 0x04,
+	CY_MT_FLAG_FLIP = 0x08,
+	CY_MT_FLAG_INV_X = 0x10,
+	CY_MT_FLAG_INV_Y = 0x20,
+	CY_MT_FLAG_VKEYS = 0x40,
+	CY_MT_FLAG_NO_TOUCH_ON_LO = 0x80,
+};
+
+struct cyttsp5_mt_platform_data {
+	struct touch_framework *frmwrk;
+	unsigned short flags;
+	char const *inp_dev_name;
+	int vkeys_x;
+	int vkeys_y;
+};
+
+struct cyttsp5_btn_platform_data {
+	char const *inp_dev_name;
+};
+
+struct cyttsp5_proximity_platform_data {
+	struct touch_framework *frmwrk;
+	char const *inp_dev_name;
+};
+
+struct cyttsp5_platform_data {
+	struct cyttsp5_core_platform_data *core_pdata;
+	struct cyttsp5_mt_platform_data *mt_pdata;
+	struct cyttsp5_btn_platform_data *btn_pdata;
+	struct cyttsp5_proximity_platform_data *prox_pdata;
+	struct cyttsp5_loader_platform_data *loader_pdata;
+};
+
+#endif /* _LINUX_CYTTSP5_CORE_H */
diff --git a/include/linux/input/cyttsp5_device_access-api.h b/include/linux/input/cyttsp5_device_access-api.h
new file mode 100644
index 0000000..c071e52
--- /dev/null
+++ b/include/linux/input/cyttsp5_device_access-api.h
@@ -0,0 +1,44 @@
+/*
+ * cyttsp5_device_access-api.h
+ * Parade TrueTouch(TM) Standard Product V5 Device Access API module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015 Parade Semiconductor
+ * Copyright (C) 2012-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Semiconductor at www.parade.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#ifndef _LINUX_CYTTSP5_DEVICE_ACCESS_API_H
+#define _LINUX_CYTTSP5_DEVICE_ACCESS_API_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+int cyttsp5_device_access_user_command(const char *core_name, u16 read_len,
+		u8 *read_buf, u16 write_len, u8 *write_buf,
+		u16 *actual_read_len);
+
+int cyttsp5_device_access_user_command_async(const char *core_name,
+		u16 read_len, u8 *read_buf, u16 write_len, u8 *write_buf,
+		void (*cont)(const char *core_name, u16 read_len, u8 *read_buf,
+			u16 write_len, u8 *write_buf, u16 actual_read_length,
+			int rc));
+#endif /* _LINUX_CYTTSP5_DEVICE_ACCESS_API_H */
diff --git a/include/linux/input/cyttsp5_platform.h b/include/linux/input/cyttsp5_platform.h
new file mode 100644
index 0000000..9469c4a
--- /dev/null
+++ b/include/linux/input/cyttsp5_platform.h
@@ -0,0 +1,60 @@
+/*
+ * cyttsp5_platform.h
+ * Parade TrueTouch(TM) Standard Product V5 Platform Module.
+ * For use with Parade touchscreen controllers.
+ * Supported parts include:
+ * CYTMA5XX
+ * CYTMA448
+ * CYTMA445A
+ * CYTT21XXX
+ * CYTT31XXX
+ *
+ * Copyright (C) 2015 Parade Semiconductor
+ * Copyright (C) 2013-2015 Cypress Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Contact Parade Semiconductor at www.parade.com <ttdrivers@paradetech.com>
+ *
+ */
+
+#ifndef _LINUX_CYTTSP5_PLATFORM_H
+#define _LINUX_CYTTSP5_PLATFORM_H
+
+#include <linux/input/cyttsp5_core.h>
+
+#if defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5) \
+	|| defined(CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5_MODULE)
+extern struct cyttsp5_loader_platform_data _cyttsp5_loader_platform_data;
+
+int cyttsp5_xres(struct cyttsp5_core_platform_data *pdata, struct device *dev);
+int cyttsp5_init(struct cyttsp5_core_platform_data *pdata, int on,
+		struct device *dev);
+int cyttsp5_power(struct cyttsp5_core_platform_data *pdata, int on,
+		struct device *dev, atomic_t *ignore_irq);
+#ifdef CYTTSP5_DETECT_HW
+int cyttsp5_detect(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev, cyttsp5_platform_read read);
+#else
+#define cyttsp5_detect		NULL
+#endif
+int cyttsp5_irq_stat(struct cyttsp5_core_platform_data *pdata,
+		struct device *dev);
+#else /* !CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5 */
+static struct cyttsp5_loader_platform_data _cyttsp5_loader_platform_data;
+#define cyttsp5_xres		NULL
+#define cyttsp5_init		NULL
+#define cyttsp5_power		NULL
+#define cyttsp5_irq_stat	NULL
+#define cyttsp5_detect		NULL
+#endif /* CONFIG_TOUCHSCREEN_CYPRESS_CYTTSP5 */
+
+#endif /* _LINUX_CYTTSP5_PLATFORM_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c9be579..bb5547a8 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -61,6 +61,7 @@
 	unsigned int		core_internal_state__do_not_mess_with_it;
 	unsigned int		depth;		/* nested irq disables */
 	unsigned int		wake_depth;	/* nested wake enables */
+	unsigned int		tot_count;
 	unsigned int		irq_count;	/* For detecting broken IRQs */
 	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
 	unsigned int		irqs_unhandled;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b37afd1..f40dfbd 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -7,6 +7,7 @@
 struct kmem_cache;
 struct page;
 struct vm_struct;
+struct task_struct;
 
 #ifdef CONFIG_KASAN
 
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 61054f1..d83fc66 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -55,8 +55,8 @@
 
 #define u64_to_user_ptr(x) (		\
 {					\
-	typecheck(u64, x);		\
-	(void __user *)(uintptr_t)x;	\
+	typecheck(u64, (x));		\
+	(void __user *)(uintptr_t)(x);	\
 }					\
 )
 
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 7056238..44e5293 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -24,6 +24,7 @@
 struct vm_area_struct;
 struct super_block;
 struct file_system_type;
+struct poll_table_struct;
 
 struct kernfs_open_node;
 struct kernfs_iattrs;
@@ -46,6 +47,7 @@
 	KERNFS_SUICIDAL		= 0x0400,
 	KERNFS_SUICIDED		= 0x0800,
 	KERNFS_EMPTY_DIR	= 0x1000,
+	KERNFS_HAS_RELEASE	= 0x2000,
 };
 
 /* @flags for kernfs_create_root() */
@@ -175,6 +177,7 @@
 	/* published fields */
 	struct kernfs_node	*kn;
 	struct file		*file;
+	struct seq_file		*seq_file;
 	void			*priv;
 
 	/* private fields, do not use outside kernfs proper */
@@ -186,11 +189,19 @@
 
 	size_t			atomic_write_len;
 	bool			mmapped;
+	bool			released:1;
 	const struct vm_operations_struct *vm_ops;
 };
 
 struct kernfs_ops {
 	/*
+	 * Optional open/release methods.  Both are called with
+	 * @of->seq_file populated.
+	 */
+	int (*open)(struct kernfs_open_file *of);
+	void (*release)(struct kernfs_open_file *of);
+
+	/*
 	 * Read is handled by either seq_file or raw_read().
 	 *
 	 * If seq_show() is present, seq_file path is active.  Other seq
@@ -228,6 +239,9 @@
 	ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
 			 loff_t off);
 
+	unsigned int (*poll)(struct kernfs_open_file *of,
+			     struct poll_table_struct *pt);
+
 	int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -315,6 +329,8 @@
 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
 		     const char *new_name, const void *new_ns);
 int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+unsigned int kernfs_generic_poll(struct kernfs_open_file *of,
+				 struct poll_table_struct *pt);
 void kernfs_notify(struct kernfs_node *kn);
 
 const void *kernfs_super_ns(struct super_block *sb);
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
deleted file mode 100644
index 08cf540..0000000
--- a/include/linux/keychord.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  Key chord input driver
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef __LINUX_KEYCHORD_H_
-#define __LINUX_KEYCHORD_H_
-
-#include <uapi/linux/keychord.h>
-
-#endif	/* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e233925..cb527c7 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -197,6 +197,7 @@
 	struct kretprobe *rp;
 	kprobe_opcode_t *ret_addr;
 	struct task_struct *task;
+	void *fp;
 	char data[0];
 };
 
diff --git a/include/linux/list.h b/include/linux/list.h
index d1039ec..3ef3ade 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -284,6 +284,36 @@
 		__list_cut_position(list, head, entry);
 }
 
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list.  You should pass
+ * in @entry an element you know is on @head.  @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+				   struct list_head *head,
+				   struct list_head *entry)
+{
+	if (head->next == entry) {
+		INIT_LIST_HEAD(list);
+		return;
+	}
+	list->next = head->next;
+	list->next->prev = list;
+	list->prev = entry->prev;
+	list->prev->next = list;
+	head->next = entry;
+	entry->prev = head;
+}
+
 static inline void __list_splice(const struct list_head *list,
 				 struct list_head *prev,
 				 struct list_head *next)
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe10..87ff4f5 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@
 	((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
 
 #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_nulls_entry_safe(ptr, type, member) \
+	({ typeof(ptr) ____ptr = (ptr); \
+	   !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
+	})
 /**
  * ptr_is_a_nulls - Test if a ptr is a nulls
  * @ptr: ptr to be tested
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859..844fc29 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -215,9 +215,9 @@
 
 /* DA9063 Configuration registers */
 /* OTP */
-#define	DA9063_REG_OPT_COUNT		0x101
-#define	DA9063_REG_OPT_ADDR		0x102
-#define	DA9063_REG_OPT_DATA		0x103
+#define	DA9063_REG_OTP_CONT		0x101
+#define	DA9063_REG_OTP_ADDR		0x102
+#define	DA9063_REG_OTP_DATA		0x103
 
 /* Customer Trim and Configuration */
 #define	DA9063_REG_T_OFFSET		0x104
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 3ca0af07..0a68dc8 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -136,8 +136,8 @@
 #define MAX77620_FPS_PERIOD_MIN_US		40
 #define MAX20024_FPS_PERIOD_MIN_US		20
 
-#define MAX77620_FPS_PERIOD_MAX_US		2560
-#define MAX20024_FPS_PERIOD_MAX_US		5120
+#define MAX20024_FPS_PERIOD_MAX_US		2560
+#define MAX77620_FPS_PERIOD_MAX_US		5120
 
 #define MAX77620_REG_FPS_GPIO1			0x54
 #define MAX77620_REG_FPS_GPIO2			0x55
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 859fd20..509e990 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -578,6 +578,8 @@
 };
 
 struct mlx5_td {
+	/* protects tirs list changes while tirs refresh */
+	struct mutex     list_lock;
 	struct list_head tirs_list;
 	u32              tdn;
 };
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1be499d..309f165 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -796,6 +796,15 @@
 		get_zone_device_page(page);
 }
 
+static inline __must_check bool try_get_page(struct page *page)
+{
+	page = compound_head(page);
+	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+		return false;
+	page_ref_inc(page);
+	return true;
+}
+
 static inline void put_page(struct page *page)
 {
 	page = compound_head(page);
@@ -1872,6 +1881,8 @@
 	return ptl;
 }
 
+extern void __init pagecache_init(void);
+
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, unsigned long * zones_size,
 		unsigned long zone_start_pfn, unsigned long *zholes_size);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 57203c7..e717b8d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -161,6 +161,7 @@
 	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
 	WORKINGSET_REFAULT,
 	WORKINGSET_ACTIVATE,
+	WORKINGSET_RESTORE,
 	WORKINGSET_NODERECLAIM,
 	NR_ANON_MAPPED,	/* Mapped anonymous pages */
 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
diff --git a/include/linux/module.h b/include/linux/module.h
index 9d6fd1d..1dbe349 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -673,6 +673,23 @@
 	return false;
 }
 
+static inline bool within_module_core(unsigned long addr,
+				      const struct module *mod)
+{
+	return false;
+}
+
+static inline bool within_module_init(unsigned long addr,
+				      const struct module *mod)
+{
+	return false;
+}
+
+static inline bool within_module(unsigned long addr, const struct module *mod)
+{
+	return false;
+}
+
 /* Get/put a kernel symbol (calls should be symmetric) */
 #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
 #define symbol_put(x) do { } while (0)
diff --git a/include/linux/of.h b/include/linux/of.h
index 02f5a31..b30c6d9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -148,16 +148,20 @@
 #ifdef CONFIG_OF
 void of_core_init(void);
 
-static inline bool is_of_node(struct fwnode_handle *fwnode)
+static inline bool is_of_node(const struct fwnode_handle *fwnode)
 {
 	return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
 }
 
-static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
-{
-	return is_of_node(fwnode) ?
-		container_of(fwnode, struct device_node, fwnode) : NULL;
-}
+#define to_of_node(__fwnode)						\
+	({								\
+		typeof(__fwnode) __to_of_node_fwnode = (__fwnode);	\
+									\
+		is_of_node(__to_of_node_fwnode) ?			\
+			container_of(__to_of_node_fwnode,		\
+				     struct device_node, fwnode) :	\
+			NULL;						\
+	})
 
 static inline bool of_have_populated_dt(void)
 {
@@ -216,8 +220,8 @@
 static inline u64 of_read_number(const __be32 *cell, int size)
 {
 	u64 r = 0;
-	while (size--)
-		r = (r << 32) | be32_to_cpu(*(cell++));
+	for (; size--; cell++)
+		r = (r << 32) | be32_to_cpu(*cell);
 	return r;
 }
 
@@ -529,12 +533,12 @@
 {
 }
 
-static inline bool is_of_node(struct fwnode_handle *fwnode)
+static inline bool is_of_node(const struct fwnode_handle *fwnode)
 {
 	return false;
 }
 
-static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
 {
 	return NULL;
 }
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74e4dda..4077d60 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -73,12 +73,14 @@
  */
 enum pageflags {
 	PG_locked,		/* Page is locked. Don't touch. */
-	PG_error,
+	PG_waiters,		/* Page has waiters, check its waitqueue */
 	PG_referenced,
 	PG_uptodate,
 	PG_dirty,
 	PG_lru,
 	PG_active,
+	PG_workingset,
+	PG_error,
 	PG_slab,
 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
 	PG_arch_1,
@@ -167,6 +169,9 @@
  *     for compound page all operations related to the page flag applied to
  *     head page.
  *
+ * PF_ONLY_HEAD:
+ *     for compound page, callers only ever operate on the head page.
+ *
  * PF_NO_TAIL:
  *     modifications of the page flag must be done on small or head pages,
  *     checks can be done on tail pages too.
@@ -176,6 +181,9 @@
  */
 #define PF_ANY(page, enforce)	page
 #define PF_HEAD(page, enforce)	compound_head(page)
+#define PF_ONLY_HEAD(page, enforce) ({					\
+		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
+		page;})
 #define PF_NO_TAIL(page, enforce) ({					\
 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
 		compound_head(page);})
@@ -253,6 +261,7 @@
 	TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 
 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
+PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
 PAGEFLAG(Referenced, referenced, PF_HEAD)
 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
@@ -262,6 +271,8 @@
 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 	TESTCLEARFLAG(Active, active, PF_HEAD)
+PAGEFLAG(Workingset, workingset, PF_HEAD)
+	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
@@ -735,6 +746,7 @@
 
 #undef PF_ANY
 #undef PF_HEAD
+#undef PF_ONLY_HEAD
 #undef PF_NO_TAIL
 #undef PF_NO_COMPOUND
 #endif /* !__GENERATING_BOUNDS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c890724a..0dfc605 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -238,6 +238,7 @@
 #define FGP_WRITE		0x00000008
 #define FGP_NOFS		0x00000010
 #define FGP_NOWAIT		0x00000020
+#define FGP_FOR_MMAP		0x00000040
 
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 		int fgp_flags, gfp_t cache_gfp_mask);
@@ -494,22 +495,14 @@
  * and for filesystems which need to wait on PG_private.
  */
 extern void wait_on_page_bit(struct page *page, int bit_nr);
-
 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable_timeout(struct page *page,
-					     int bit_nr, unsigned long timeout);
+extern void wake_up_page_bit(struct page *page, int bit_nr);
 
-static inline int wait_on_page_locked_killable(struct page *page)
-{
-	if (!PageLocked(page))
-		return 0;
-	return wait_on_page_bit_killable(compound_head(page), PG_locked);
-}
-
-extern wait_queue_head_t *page_waitqueue(struct page *page);
 static inline void wake_up_page(struct page *page, int bit)
 {
-	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
+	if (!PageWaiters(page))
+		return;
+	wake_up_page_bit(page, bit);
 }
 
 /* 
@@ -525,6 +518,13 @@
 		wait_on_page_bit(compound_head(page), PG_locked);
 }
 
+static inline int wait_on_page_locked_killable(struct page *page)
+{
+	if (!PageLocked(page))
+		return 0;
+	return wait_on_page_bit_killable(compound_head(page), PG_locked);
+}
+
 /* 
  * Wait for a page to complete writeback
  */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 534cb43..b9ac0ba 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -320,6 +320,8 @@
 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
 						      controlled exclusively by
 						      user sysfs */
+	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
+						   bit manually */
 	unsigned int	d3_delay;	/* D3->D0 transition time in ms */
 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
 
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index e7497c9..4f71293 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -182,6 +182,7 @@
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
 
 extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
 
diff --git a/include/linux/property.h b/include/linux/property.h
index 459337f..d5c7ebd 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -233,7 +233,7 @@
 #define PROPERTY_ENTRY_STRING(_name_, _val_)		\
 (struct property_entry) {				\
 	.name = _name_,					\
-	.length = sizeof(_val_),			\
+	.length = sizeof(const char *),			\
 	.is_string = true,				\
 	{ .value = { .str = _val_ } },			\
 }
diff --git a/include/linux/psi.h b/include/linux/psi.h
new file mode 100644
index 0000000..b825fa5
--- /dev/null
+++ b/include/linux/psi.h
@@ -0,0 +1,63 @@
+#ifndef _LINUX_PSI_H
+#define _LINUX_PSI_H
+
+#include <linux/jump_label.h>
+#include <linux/psi_types.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/cgroup-defs.h>
+
+struct seq_file;
+struct css_set;
+
+#ifdef CONFIG_PSI
+
+extern struct static_key_false psi_disabled;
+
+void psi_init(void);
+
+void psi_task_change(struct task_struct *task, int clear, int set);
+
+void psi_memstall_tick(struct task_struct *task, int cpu);
+void psi_memstall_enter(unsigned long *flags);
+void psi_memstall_leave(unsigned long *flags);
+
+int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res);
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+
+unsigned int psi_trigger_poll(void **trigger_ptr, struct file *file,
+			      poll_table *wait);
+#endif
+
+#else /* CONFIG_PSI */
+
+static inline void psi_init(void) {}
+
+static inline void psi_memstall_enter(unsigned long *flags) {}
+static inline void psi_memstall_leave(unsigned long *flags) {}
+
+#ifdef CONFIG_CGROUPS
+static inline int psi_cgroup_alloc(struct cgroup *cgrp)
+{
+	return 0;
+}
+static inline void psi_cgroup_free(struct cgroup *cgrp)
+{
+}
+static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
+{
+	rcu_assign_pointer(p->cgroups, to);
+}
+#endif
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_H */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
new file mode 100644
index 0000000..07aaf9b
--- /dev/null
+++ b/include/linux/psi_types.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_PSI_TYPES_H
+#define _LINUX_PSI_TYPES_H
+
+#include <linux/kthread.h>
+#include <linux/seqlock.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
+
+#ifdef CONFIG_PSI
+
+/* Tracked task states */
+enum psi_task_count {
+	NR_IOWAIT,
+	NR_MEMSTALL,
+	NR_RUNNING,
+	NR_PSI_TASK_COUNTS = 3,
+};
+
+/* Task state bitmasks */
+#define TSK_IOWAIT	(1 << NR_IOWAIT)
+#define TSK_MEMSTALL	(1 << NR_MEMSTALL)
+#define TSK_RUNNING	(1 << NR_RUNNING)
+
+/* Resources that workloads could be stalled on */
+enum psi_res {
+	PSI_IO,
+	PSI_MEM,
+	PSI_CPU,
+	NR_PSI_RESOURCES = 3,
+};
+
+/*
+ * Pressure states for each resource:
+ *
+ * SOME: Stalled tasks & working tasks
+ * FULL: Stalled tasks & no working tasks
+ */
+enum psi_states {
+	PSI_IO_SOME,
+	PSI_IO_FULL,
+	PSI_MEM_SOME,
+	PSI_MEM_FULL,
+	PSI_CPU_SOME,
+	/* Only per-CPU, to weigh the CPU in the global average: */
+	PSI_NONIDLE,
+	NR_PSI_STATES = 6,
+};
+
+enum psi_aggregators {
+	PSI_AVGS = 0,
+	PSI_POLL,
+	NR_PSI_AGGREGATORS,
+};
+
+struct psi_group_cpu {
+	/* 1st cacheline updated by the scheduler */
+
+	/* Aggregator needs to know of concurrent changes */
+	seqcount_t seq ____cacheline_aligned_in_smp;
+
+	/* States of the tasks belonging to this group */
+	unsigned int tasks[NR_PSI_TASK_COUNTS];
+
+	/* Aggregate pressure state derived from the tasks */
+	u32 state_mask;
+
+	/* Period time sampling buckets for each state of interest (ns) */
+	u32 times[NR_PSI_STATES];
+
+	/* Time of last task change in this group (rq_clock) */
+	u64 state_start;
+
+	/* 2nd cacheline updated by the aggregator */
+
+	/* Delta detection against the sampling buckets */
+	u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+			____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+	/* Window size in ns */
+	u64 size;
+
+	/* Start time of the current window in ns */
+	u64 start_time;
+
+	/* Value at the start of the window */
+	u64 start_value;
+
+	/* Value growth in the previous window */
+	u64 prev_growth;
+};
+
+struct psi_trigger {
+	/* PSI state being monitored by the trigger */
+	enum psi_states state;
+
+	/* User-spacified threshold in ns */
+	u64 threshold;
+
+	/* List node inside triggers list */
+	struct list_head node;
+
+	/* Backpointer needed during trigger destruction */
+	struct psi_group *group;
+
+	/* Wait queue for polling */
+	wait_queue_head_t event_wait;
+
+	/* Pending event flag */
+	int event;
+
+	/* Tracking window */
+	struct psi_window win;
+
+	/*
+	 * Time last event was generated. Used for rate-limiting
+	 * events to one per window
+	 */
+	u64 last_event_time;
+
+	/* Refcounting to prevent premature destruction */
+	struct kref refcount;
+};
+
+struct psi_group {
+	/* Protects data used by the aggregator */
+	struct mutex avgs_lock;
+
+	/* Per-cpu task state & time tracking */
+	struct psi_group_cpu __percpu *pcpu;
+
+	/* Running pressure averages */
+	u64 avg_total[NR_PSI_STATES - 1];
+	u64 avg_last_update;
+	u64 avg_next_update;
+
+	/* Aggregator work control */
+	struct delayed_work avgs_work;
+
+	/* Total stall times and sampled pressure averages */
+	u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
+	unsigned long avg[NR_PSI_STATES - 1][3];
+
+	/* Monitor work control */
+	atomic_t poll_scheduled;
+	struct kthread_worker __rcu *poll_kworker;
+	struct kthread_delayed_work poll_work;
+
+	/* Protects data used by the monitor */
+	struct mutex trigger_lock;
+
+	/* Configured polling triggers */
+	struct list_head triggers;
+	u32 nr_triggers[NR_PSI_STATES - 1];
+	u32 poll_states;
+	u64 poll_min_period;
+
+	/* Total stall times at the start of monitor activation */
+	u64 polling_total[NR_PSI_STATES - 1];
+	u64 polling_next_update;
+	u64 polling_until;
+};
+
+#else /* CONFIG_PSI */
+
+struct psi_group { };
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_TYPES_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index d53a231..58ae371 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -60,14 +60,17 @@
 #define PTRACE_MODE_READ	0x01
 #define PTRACE_MODE_ATTACH	0x02
 #define PTRACE_MODE_NOAUDIT	0x04
-#define PTRACE_MODE_FSCREDS 0x08
-#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_FSCREDS	0x08
+#define PTRACE_MODE_REALCREDS	0x10
+#define PTRACE_MODE_SCHED	0x20
+#define PTRACE_MODE_IBPB	0x40
 
 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
 #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
 #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
 #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
 #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
 
 /**
  * ptrace_may_access - check whether the caller is permitted to access
@@ -85,6 +88,20 @@
  */
 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * Similar to ptrace_may_access(). Only to be called from context switch
+ * code. Does not call into audit and the regular LSM hooks due to locking
+ * constraints.
+ */
+extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
+
 static inline int ptrace_reparented(struct task_struct *child)
 {
 	return !same_thread_group(child->real_parent, child->parent);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 6224a0a..2720b2f 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -118,5 +118,19 @@
 		({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
 		pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
 
+/**
+ * hlist_nulls_for_each_entry_safe -
+ *   iterate over list of given type safe against removal of list entry
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_nulls_node to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_nulls_node within the struct.
+ */
+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member)		\
+	for (({barrier();}),							\
+	     pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));		\
+		(!is_a_nulls(pos)) &&						\
+		({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member);	\
+		   pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
 #endif
 #endif
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 68c1448..2560f87 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -65,7 +65,7 @@
 	struct kref kref;		/* channel refcount */
 	void *private_data;		/* for user-defined data */
 	size_t last_toobig;		/* tried to log event > subbuf size */
-	struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
+	struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
 	int is_global;			/* One global buffer ? */
 	struct list_head list;		/* for channel list */
 	struct dentry *parent;		/* parent dentry passed to open */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 19d0778..121c8f9 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -125,7 +125,7 @@
 		    unsigned long *lost_events);
 
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
 void ring_buffer_read_prepare_sync(void);
 void ring_buffer_read_start(struct ring_buffer_iter *iter);
 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3b2f96a..9ffeebc 100755
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -58,7 +58,6 @@
 #include <linux/uidgid.h>
 #include <linux/gfp.h>
 #include <linux/magic.h>
-#include <linux/cgroup-defs.h>
 
 #include <asm/processor.h>
 
@@ -139,31 +138,6 @@
 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
 
-/*
- * These are the constant used to fake the fixed-point load-average
- * counting. Some notes:
- *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
- *    a load-average precision of 10 bits integer + 11 bits fractional
- *  - if you want to count load-averages more often, you need more
- *    precision, or rounding will get you. With 2-second counting freq,
- *    the EXP_n values would be 1981, 2034 and 2043 if still using only
- *    11 bit fractions.
- */
-extern unsigned long avenrun[];		/* Load averages */
-extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
-
-#define FSHIFT		11		/* nr of bits of precision */
-#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
-#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
-#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
-#define EXP_5		2014		/* 1/exp(5sec/5min) */
-#define EXP_15		2037		/* 1/exp(5sec/15min) */
-
-#define CALC_LOAD(load,exp,n) \
-	load *= exp; \
-	load += n*(FIXED_1-exp); \
-	load >>= FSHIFT;
-
 extern unsigned long total_forks;
 extern int nr_threads;
 DECLARE_PER_CPU(unsigned long, process_counts);
@@ -1020,39 +994,7 @@
 };
 #endif /* CONFIG_SCHED_INFO */
 
-#ifdef CONFIG_TASK_DELAY_ACCT
-struct task_delay_info {
-	spinlock_t	lock;
-	unsigned int	flags;	/* Private per-task flags */
-
-	/* For each stat XXX, add following, aligned appropriately
-	 *
-	 * struct timespec XXX_start, XXX_end;
-	 * u64 XXX_delay;
-	 * u32 XXX_count;
-	 *
-	 * Atomicity of updates to XXX_delay, XXX_count protected by
-	 * single lock above (split into XXX_lock if contention is an issue).
-	 */
-
-	/*
-	 * XXX_count is incremented on every XXX operation, the delay
-	 * associated with the operation is added to XXX_delay.
-	 * XXX_delay contains the accumulated delay time in nanoseconds.
-	 */
-	u64 blkio_start;	/* Shared by blkio, swapin */
-	u64 blkio_delay;	/* wait for sync block io completion */
-	u64 swapin_delay;	/* wait for swapin block io completion */
-	u32 blkio_count;	/* total count of the number of sync block */
-				/* io operations performed */
-	u32 swapin_count;	/* total count of the number of swapin block */
-				/* io operations performed */
-
-	u64 freepages_start;
-	u64 freepages_delay;	/* wait for memory reclaim */
-	u32 freepages_count;	/* total count of memory reclaim */
-};
-#endif	/* CONFIG_TASK_DELAY_ACCT */
+struct task_delay_info;
 
 static inline int sched_info_on(void)
 {
@@ -1850,6 +1792,10 @@
 	unsigned sched_contributes_to_load:1;
 	unsigned sched_migrated:1;
 	unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_PSI
+	unsigned			sched_psi_wake_requeue:1;
+#endif
+
 	unsigned :0; /* force alignment to the next boundary */
 
 	/* unserialized, strictly 'current' */
@@ -2067,6 +2013,10 @@
 	unsigned long ptrace_message;
 	siginfo_t *last_siginfo; /* For ptrace use.  */
 	struct task_io_accounting ioac;
+#ifdef CONFIG_PSI
+	/* Pressure stall state */
+	unsigned int			psi_flags;
+#endif
 #if defined(CONFIG_TASK_XACCT)
 	u64 acct_rss_mem1;	/* accumulated rss usage */
 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
@@ -2160,9 +2110,10 @@
 
 	struct page_frag task_frag;
 
-#ifdef	CONFIG_TASK_DELAY_ACCT
-	struct task_delay_info *delays;
+#ifdef CONFIG_TASK_DELAY_ACCT
+	struct task_delay_info		*delays;
 #endif
+
 #ifdef CONFIG_FAULT_INJECTION
 	int make_it_fail;
 #endif
@@ -2573,6 +2524,7 @@
 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
+#define PF_MEMSTALL	0x01000000	/* Stalled due to lack of memory */
 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
@@ -2633,6 +2585,8 @@
 #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
 #define PFA_SPEC_SSB_DISABLE		4	/* Speculative Store Bypass disabled */
 #define PFA_SPEC_SSB_FORCE_DISABLE	5	/* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE		6	/* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE	7	/* Indirect branch speculation permanently restricted */
 
 
 #define TASK_PFA_TEST(name, func)					\
@@ -2666,6 +2620,13 @@
 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
 
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
 /*
  * task->jobctl flags
  */
@@ -3491,11 +3452,7 @@
  * subsystems needing threadgroup stability can hook into for
  * synchronization.
  */
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
-	might_sleep();
-	cgroup_threadgroup_change_begin(tsk);
-}
+extern void threadgroup_change_begin(struct task_struct *tsk);
 
 /**
  * threadgroup_change_end - mark the end of changes to a threadgroup
@@ -3503,10 +3460,7 @@
  *
  * See threadgroup_change_begin().
  */
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
-	cgroup_threadgroup_change_end(tsk);
-}
+extern void threadgroup_change_end(struct task_struct *tsk);
 
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
new file mode 100644
index 0000000..208bbf2
--- /dev/null
+++ b/include/linux/sched/loadavg.h
@@ -0,0 +1,47 @@
+#ifndef _LINUX_SCHED_LOADAVG_H
+#define _LINUX_SCHED_LOADAVG_H
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ *    a load-average precision of 10 bits integer + 11 bits fractional
+ *  - if you want to count load-averages more often, you need more
+ *    precision, or rounding will get you. With 2-second counting freq,
+ *    the EXP_n values would be 1981, 2034 and 2043 if still using only
+ *    11 bit fractions.
+ */
+extern unsigned long avenrun[];		/* Load averages */
+extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+
+#define FSHIFT		11		/* nr of bits of precision */
+#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
+#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
+#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5		2014		/* 1/exp(5sec/5min) */
+#define EXP_15		2037		/* 1/exp(5sec/15min) */
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
+static inline unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+	unsigned long newload;
+
+	newload = load * exp + active * (FIXED_1 - exp);
+	if (active >= load)
+		newload += FIXED_1-1;
+
+	return newload / FIXED_1;
+}
+
+extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
+				 unsigned long active, unsigned int n);
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+extern void calc_global_load(unsigned long ticks);
+
+#endif /* _LINUX_SCHED_LOADAVG_H */
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 0000000..559ac45
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern atomic_t sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+	return atomic_read(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644
index 0000000..fa7a6b9
--- /dev/null
+++ b/include/linux/siphash.h
@@ -0,0 +1,140 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+	u64 key[2];
+} siphash_key_t;
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+		 const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+		 const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+		 const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+			       const siphash_key_t *key)
+{
+	return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+			       const u32 d, const siphash_key_t *key)
+{
+	return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+				     const siphash_key_t *key)
+{
+	if (__builtin_constant_p(len) && len == 4)
+		return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+	if (__builtin_constant_p(len) && len == 8)
+		return siphash_1u64(le64_to_cpu(data[0]), key);
+	if (__builtin_constant_p(len) && len == 16)
+		return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+				    key);
+	if (__builtin_constant_p(len) && len == 24)
+		return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+				    le64_to_cpu(data[2]), key);
+	if (__builtin_constant_p(len) && len == 32)
+		return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+				    le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+				    key);
+	return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+			  const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+		return __siphash_unaligned(data, len, key);
+#endif
+	return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+	unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+		       const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+			 const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+		  const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+		  const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+				      const hsiphash_key_t *key)
+{
+	if (__builtin_constant_p(len) && len == 4)
+		return hsiphash_1u32(le32_to_cpu(data[0]), key);
+	if (__builtin_constant_p(len) && len == 8)
+		return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+				     key);
+	if (__builtin_constant_p(len) && len == 12)
+		return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+				     le32_to_cpu(data[2]), key);
+	if (__builtin_constant_p(len) && len == 16)
+		return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+				     le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+				     key);
+	return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+			   const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+		return __hsiphash_unaligned(data, len, key);
+#endif
+	return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index d55f6ee..c890bdf 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -111,6 +111,9 @@
 #ifndef __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *,const void *,__kernel_size_t);
 #endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
 #ifndef __HAVE_ARCH_MEMCHR
 extern void * memchr(const void *,int,__kernel_size_t);
 #endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7b488ec..5c15a01 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -135,9 +135,9 @@
 /*
  * Max bad pages in the new format..
  */
-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
 #define MAX_SWAP_BADPAGES \
-	((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
+	((offsetof(union swap_header, magic.magic) - \
+	  offsetof(union swap_header, info.badpages)) / sizeof(int))
 
 enum {
 	SWP_USED	= (1 << 0),	/* is slot in swap_info[] used? */
@@ -249,7 +249,7 @@
 
 /* linux/mm/workingset.c */
 void *workingset_eviction(struct address_space *mapping, struct page *page);
-bool workingset_refault(void *shadow);
+void workingset_refault(struct page *page, void *shadow);
 void workingset_activation(struct page *page);
 extern struct list_lru workingset_shadow_nodes;
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 47d29d8..16272f1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -129,7 +129,6 @@
  * @dev: driver model's view of this device
  * @usb_dev: if an interface is bound to the USB major, this will point
  *	to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
  * @reset_ws: Used for scheduling resets from atomic context.
  * @resetting_device: USB core reset the device, so use alt setting 0 as
  *	current; needs bandwidth alloc after reset.
@@ -186,7 +185,6 @@
 
 	struct device dev;		/* interface specific device info */
 	struct device *usb_dev;
-	atomic_t pm_usage_cnt;		/* usage counter for autosuspend */
 	struct work_struct reset_ws;	/* for resets in atomic context */
 };
 #define	to_usb_interface(d) container_of(d, struct usb_interface, dev)
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e8d3693..b38c187 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -62,7 +62,7 @@
 /*
  * Creates a virtqueue and allocates the descriptor ring.  If
  * may_reduce_num is set, then this may allocate a smaller ring than
- * expected.  The caller should query virtqueue_get_ring_size to learn
+ * expected.  The caller should query virtqueue_get_vring_size to learn
  * the actual size of the ring.
  */
 struct virtqueue *vring_create_virtqueue(unsigned int index,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8e880f7..b95c511 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -359,6 +359,8 @@
 extern struct workqueue_struct *system_power_efficient_wq;
 extern struct workqueue_struct *system_freezable_power_efficient_wq;
 
+extern bool wq_online;
+
 extern struct workqueue_struct *
 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
 	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
@@ -598,7 +600,7 @@
  */
 static inline bool keventd_up(void)
 {
-	return system_wq != NULL;
+	return wq_online;
 }
 
 #ifndef CONFIG_SMP
@@ -635,4 +637,7 @@
 int workqueue_offline_cpu(unsigned int cpu);
 #endif
 
+int __init workqueue_init_early(void);
+int __init workqueue_init(void);
+
 #endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3eed4f1..6341e78 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -363,7 +363,6 @@
 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
 
 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
-void page_writeback_init(void);
 void balance_dirty_pages_ratelimited(struct address_space *mapping);
 bool wb_over_bg_thresh(struct bdi_writeback *wb);
 
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4931787..57a7dba 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -176,6 +176,9 @@
 
 #define HCI_MAX_SHORT_NAME_LENGTH	10
 
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE		7
+
 /* Default LE RPA expiry time, 15 minutes */
 #define HCI_DEFAULT_RPA_TIMEOUT		(15 * 60)
 
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c5..801489bb 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@
  */
 int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
 
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+	u8 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 1);
+
+	return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+	__le16 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 2);
+
+	return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+	__le32 tmp;
+
+	cfpkt_extr_head(pkt, &tmp, 4);
+
+	return le32_to_cpu(tmp);
+}
+
 /*
  * Peek header from packet.
  * Reads data from packet without changing packet.
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 95f33ee..6db0e85 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -18,22 +18,36 @@
 {
 	struct gro_cell *cell;
 	struct net_device *dev = skb->dev;
+	int res;
 
-	if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
-		return netif_rx(skb);
+	rcu_read_lock();
+	if (unlikely(!(dev->flags & IFF_UP)))
+		goto drop;
+
+	if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+		res = netif_rx(skb);
+		goto unlock;
+	}
 
 	cell = this_cpu_ptr(gcells->cells);
 
 	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
 		atomic_long_inc(&dev->rx_dropped);
 		kfree_skb(skb);
-		return NET_RX_DROP;
+		res = NET_RX_DROP;
+		goto unlock;
 	}
 
 	__skb_queue_tail(&cell->napi_skbs, skb);
 	if (skb_queue_len(&cell->napi_skbs) == 1)
 		napi_schedule(&cell->napi);
-	return NET_RX_SUCCESS;
+
+	res = NET_RX_SUCCESS;
+
+unlock:
+	rcu_read_unlock();
+	return res;
 }
 
 /* called under BH context */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 197a30d..146054c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -289,11 +289,6 @@
 	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
 }
 
-static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
-{
-	return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
-}
-
 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index a3812e9..c2c724a 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -76,8 +76,8 @@
 	struct timer_list	timer;
 	spinlock_t		lock;
 	atomic_t		refcnt;
-	struct sk_buff		*fragments;  /* Used in IPv6. */
-	struct rb_root		rb_fragments; /* Used in IPv4. */
+	struct sk_buff		*fragments;  /* used in 6lopwpan IPv6. */
+	struct rb_root		rb_fragments; /* Used in IPv4/IPv6. */
 	struct sk_buff		*fragments_tail;
 	struct sk_buff		*last_run_head;
 	ktime_t			stamp;
@@ -152,4 +152,16 @@
 
 extern const u8 ip_frag_ecn_table[16];
 
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK	0
+#define IPFRAG_DUP	1
+#define IPFRAG_OVERLAP	2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+			   int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+			      struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+			    void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
 #endif
diff --git a/include/net/ip.h b/include/net/ip.h
index 9c31d95..db6e110 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -583,7 +583,7 @@
 			     unsigned char __user *data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
 
 /*
  *	Functions provided by ip_sockglue.c
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7cb100d..168009e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -511,35 +511,6 @@
 }
 #endif
 
-struct inet_frag_queue;
-
-enum ip6_defrag_users {
-	IP6_DEFRAG_LOCAL_DELIVER,
-	IP6_DEFRAG_CONNTRACK_IN,
-	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
-	IP6_DEFRAG_CONNTRACK_OUT,
-	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
-	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
-	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
-};
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-extern const struct rhashtable_params ip6_rhash_params;
-
-/*
- *	Equivalent of ipv4 struct ip
- */
-struct frag_queue {
-	struct inet_frag_queue	q;
-
-	int			iif;
-	unsigned int		csum;
-	__u16			nhoffset;
-	u8			ecn;
-};
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
-
 static inline bool ipv6_addr_any(const struct in6_addr *a)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 0000000..28aa9b3
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+	IP6_DEFRAG_LOCAL_DELIVER,
+	IP6_DEFRAG_CONNTRACK_IN,
+	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+	IP6_DEFRAG_CONNTRACK_OUT,
+	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ *	Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+	struct inet_frag_queue	q;
+
+	int			iif;
+	__u16			nhoffset;
+	u8			ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+	struct frag_queue *fq = container_of(q, struct frag_queue, q);
+	const struct frag_v6_compare_key *key = a;
+
+	q->key.v6 = *key;
+	fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+	return jhash2(data,
+		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+	const struct inet_frag_queue *fq = data;
+
+	return jhash2((const u32 *)&fq->key.v6,
+		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+	const struct frag_v6_compare_key *key = arg->key;
+	const struct inet_frag_queue *fq = ptr;
+
+	return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+	struct net_device *dev = NULL;
+	struct sk_buff *head;
+
+	rcu_read_lock();
+	spin_lock(&fq->q.lock);
+
+	if (fq->q.flags & INET_FRAG_COMPLETE)
+		goto out;
+
+	inet_frag_kill(&fq->q);
+
+	dev = dev_get_by_index_rcu(net, fq->iif);
+	if (!dev)
+		goto out;
+
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+	/* Don't send error if the first segment did not arrive. */
+	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+		goto out;
+
+	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
+	 * pull the head out of the tree in order to be able to
+	 * deal with head->dev.
+	 */
+	head = inet_frag_pull_head(&fq->q);
+	if (!head)
+		goto out;
+
+	head->dev = dev;
+	skb_get(head);
+	spin_unlock(&fq->q.lock);
+
+	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+	kfree_skb(head);
+	goto out_rcu_unlock;
+
+out:
+	spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+	rcu_read_unlock();
+	inet_frag_put(&fq->q);
+}
+#endif
+#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c05db6f..0cdafe3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -53,6 +53,7 @@
 						 */
 	spinlock_t		rules_mod_lock;
 
+	u32			hash_mix;
 	atomic64_t		cookie_gen;
 
 	struct list_head	list;		/* list of network namespaces */
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 0b0c35c..238d1b8 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -48,7 +48,6 @@
 }
 
 struct net_device *setup_pre_routing(struct sk_buff *skb);
-void br_netfilter_enable(void);
 
 #if IS_ENABLED(CONFIG_IPV6)
 int br_validate_ipv6(struct net *net, struct sk_buff *skb);
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 69a6715..a347b2f 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -1,21 +1,10 @@
 #ifndef __NET_NS_HASH_H__
 #define __NET_NS_HASH_H__
 
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
 
 static inline u32 net_hash_mix(const struct net *net)
 {
-#ifdef CONFIG_NET_NS
-	/*
-	 * shift this right to eliminate bits, that are
-	 * always zeroed
-	 */
-
-	return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
-#else
-	return 0;
-#endif
+	return net->hash_mix;
 }
 #endif
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6d..98f31c7 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@
 		u8		state_after_reset;	/* reset request */
 		u8		error_code;		/* any response */
 		u8		pep_type;		/* status indication */
-		u8		data[1];
+		u8		data0;			/* anything else */
 	};
+	u8			data[];
 };
-#define other_pep_type		data[1]
+#define other_pep_type		data[0]
 
 static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
 {
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a3..803fc26 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -60,7 +60,7 @@
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
 					unsigned int offset)
 {
-	struct sctphdr *sh = sctp_hdr(skb);
+	struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
         __le32 ret, old = sh->checksum;
 	const struct skb_checksum_ops ops = {
 		.update  = sctp_csum_update,
diff --git a/include/net/sock.h b/include/net/sock.h
index e7756fe..5375467 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -651,6 +651,12 @@
 		hlist_add_head_rcu(&sk->sk_node, list);
 }
 
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+	sock_hold(sk);
+	hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
diff --git a/include/soc/qcom/boot_stats.h b/include/soc/qcom/boot_stats.h
index c81fc24..227d243 100644
--- a/include/soc/qcom/boot_stats.h
+++ b/include/soc/qcom/boot_stats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014,2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,40 @@
  */
 
 #ifdef CONFIG_MSM_BOOT_STATS
+
+#define TIMER_KHZ 32768
+extern struct boot_stats __iomem *boot_stats;
+
+struct boot_stats {
+	uint32_t bootloader_start;
+	uint32_t bootloader_end;
+	uint32_t bootloader_display;
+	uint32_t bootloader_load_kernel;
+	uint32_t load_kernel_start;
+	uint32_t load_kernel_end;
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+	uint32_t bootloader_early_domain_start;
+	uint32_t bootloader_checksum;
+#endif
+};
+
 int boot_stats_init(void);
+int boot_stats_exit(void);
+unsigned long long int msm_timer_get_sclk_ticks(void);
+phys_addr_t msm_timer_get_pa(void);
 #else
 static inline int boot_stats_init(void) { return 0; }
+static inline unsigned long long int msm_timer_get_sclk_ticks(void)
+{
+	return 0;
+}
+static inline phys_addr_t msm_timer_get_pa(void) { return 0; }
+#endif
+
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+static inline int boot_marker_enabled(void) { return 1; }
+void place_marker(const char *name);
+#else
+static inline void place_marker(char *name) { };
+static inline int boot_marker_enabled(void) { return 0; }
 #endif
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 0717cd1..e55353a 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -50,6 +50,7 @@
 	int (*suspend_noirq)(struct device *dev);
 	int (*resume_noirq)(struct device *dev);
 	int (*uevent)(struct device *dev, struct icnss_uevent_data *uevent);
+	int (*set_therm_state)(struct device *dev, unsigned long thermal_state);
 };
 
 
@@ -146,4 +147,8 @@
 extern int icnss_trigger_recovery(struct device *dev);
 extern void icnss_block_shutdown(bool status);
 extern bool icnss_is_pdr(void);
+extern int icnss_thermal_register(struct device *dev, unsigned long max_state);
+extern void icnss_thermal_unregister(struct device *dev);
+extern int icnss_get_curr_therm_state(struct device *dev,
+					unsigned long *thermal_state);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 862153e..f6fc1e0 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -138,6 +138,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda439")
 #define early_machine_is_sda429()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda429")
+#define early_machine_is_sdm429w()       \
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm429w")
 #define early_machine_is_mdm9650()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9650")
 #define early_machine_is_qm215()	\
@@ -199,6 +201,7 @@
 #define early_machine_is_sdm429()	0
 #define early_machine_is_sda439()	0
 #define early_machine_is_sda429()	0
+#define early_machine_is_sdm429w()      0
 #define early_machine_is_mdm9650()     0
 #define early_machine_is_qm215()	0
 #define early_machine_is_sdm712()	0
@@ -283,6 +286,7 @@
 	MSM_CPU_SDM429,
 	MSM_CPU_SDA439,
 	MSM_CPU_SDA429,
+	MSM_CPU_SDM429W,
 	MSM_CPU_9650,
 	MSM_CPU_QM215,
 };
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e88465a..cea051b 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -142,6 +142,17 @@
 		{ CP_SPEC_LOG_NUM,	"log type is 2" },		\
 		{ CP_RECOVER_DIR,	"dir needs recovery" })
 
+#define show_shutdown_mode(type)					\
+	__print_symbolic(type,						\
+		{ F2FS_GOING_DOWN_FULLSYNC,	"full sync" },		\
+		{ F2FS_GOING_DOWN_METASYNC,	"meta sync" },		\
+		{ F2FS_GOING_DOWN_NOSYNC,	"no sync" },		\
+		{ F2FS_GOING_DOWN_METAFLUSH,	"meta flush" },		\
+		{ F2FS_GOING_DOWN_NEED_FSCK,	"need fsck" })
+
+struct f2fs_sb_info;
+struct f2fs_io_info;
+struct extent_info;
 struct victim_sel_policy;
 struct f2fs_map_blocks;
 
@@ -515,6 +526,37 @@
 		__entry->err)
 );
 
+TRACE_EVENT(f2fs_file_write_iter,
+
+	TP_PROTO(struct inode *inode, unsigned long offset,
+		unsigned long length, int ret),
+
+	TP_ARGS(inode, offset, length, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(unsigned long, offset)
+		__field(unsigned long, length)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->offset	= offset;
+		__entry->length	= length;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, "
+		"offset = %lu, length = %lu, written(err) = %d",
+		show_dev_ino(__entry),
+		__entry->offset,
+		__entry->length,
+		__entry->ret)
+);
+
 TRACE_EVENT(f2fs_map_blocks,
 	TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
 
@@ -526,6 +568,9 @@
 		__field(block_t,	m_lblk)
 		__field(block_t,	m_pblk)
 		__field(unsigned int,	m_len)
+		__field(unsigned int,	m_flags)
+		__field(int,	m_seg_type)
+		__field(bool,	m_may_create)
 		__field(int,	ret)
 	),
 
@@ -535,15 +580,22 @@
 		__entry->m_lblk		= map->m_lblk;
 		__entry->m_pblk		= map->m_pblk;
 		__entry->m_len		= map->m_len;
+		__entry->m_flags	= map->m_flags;
+		__entry->m_seg_type	= map->m_seg_type;
+		__entry->m_may_create	= map->m_may_create;
 		__entry->ret		= ret;
 	),
 
 	TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
-		"start blkaddr = 0x%llx, len = 0x%llx, err = %d",
+		"start blkaddr = 0x%llx, len = 0x%llx, flags = %u,"
+		"seg_type = %d, may_create = %d, err = %d",
 		show_dev_ino(__entry),
 		(unsigned long long)__entry->m_lblk,
 		(unsigned long long)__entry->m_pblk,
 		(unsigned long long)__entry->m_len,
+		__entry->m_flags,
+		__entry->m_seg_type,
+		__entry->m_may_create,
 		__entry->ret)
 );
 
@@ -1225,6 +1277,32 @@
 	TP_ARGS(page, type)
 );
 
+TRACE_EVENT(f2fs_filemap_fault,
+
+	TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+
+	TP_ARGS(inode, index, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(pgoff_t, index)
+		__field(unsigned long, ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->index	= index;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+		show_dev_ino(__entry),
+		(unsigned long)__entry->index,
+		__entry->ret)
+);
+
 TRACE_EVENT(f2fs_writepages,
 
 	TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
@@ -1609,6 +1687,30 @@
 	TP_ARGS(sb, type, count)
 );
 
+TRACE_EVENT(f2fs_shutdown,
+
+	TP_PROTO(struct f2fs_sb_info *sbi, unsigned int mode, int ret),
+
+	TP_ARGS(sbi, mode, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(unsigned int, mode)
+		__field(int, ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev = sbi->sb->s_dev;
+		__entry->mode = mode;
+		__entry->ret = ret;
+	),
+
+	TP_printk("dev = (%d,%d), mode: %s, ret:%d",
+		show_dev(__entry->dev),
+		show_shutdown_mode(__entry->mode),
+		__entry->ret)
+);
+
 #endif /* _TRACE_F2FS_H */
 
  /* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5a81ab4..61d41f2 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -81,12 +81,14 @@
 
 #define __def_pageflag_names						\
 	{1UL << PG_locked,		"locked"	},		\
+	{1UL << PG_waiters,		"waiters"	},		\
 	{1UL << PG_error,		"error"		},		\
 	{1UL << PG_referenced,		"referenced"	},		\
 	{1UL << PG_uptodate,		"uptodate"	},		\
 	{1UL << PG_dirty,		"dirty"		},		\
 	{1UL << PG_lru,			"lru"		},		\
 	{1UL << PG_active,		"active"	},		\
+	{1UL << PG_workingset,		"workingset"	},		\
 	{1UL << PG_slab,		"slab"		},		\
 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},		\
 	{1UL << PG_arch_1,		"arch_1"	},		\
diff --git a/include/trace/events/msm_cam.h b/include/trace/events/msm_cam.h
index 3fc1a29..b0dc7ea 100644
--- a/include/trace/events/msm_cam.h
+++ b/include/trace/events/msm_cam.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, 2019,  The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -131,6 +131,34 @@
 	)
 );
 
+TRACE_EVENT(msm_cam_isp_status_dump,
+	TP_PROTO(char *event, uint32_t vfe_id, uint32_t frame_id,
+		uint32_t irq_status0, uint32_t irq_status1),
+	TP_ARGS(event, vfe_id, frame_id, irq_status0,
+		irq_status1),
+	TP_STRUCT__entry(
+		__field(char *, event)
+		__field(unsigned int, vfe_id)
+		__field(unsigned int, frame_id)
+		__field(unsigned int, irq_status0)
+		__field(unsigned int, irq_status1)
+	),
+	TP_fast_assign(
+		__entry->event = event;
+		__entry->vfe_id = vfe_id;
+		__entry->frame_id = frame_id;
+		__entry->irq_status0 = irq_status0;
+		__entry->irq_status1 = irq_status1;
+	),
+	TP_printk("%s vfe %d, frame %d, irq_st0 %x, irq_st1 %x\n",
+		__entry->event,
+		__entry->vfe_id,
+		__entry->frame_id,
+		__entry->irq_status0,
+		__entry->irq_status1
+	)
+);
+
 #endif /* _MSM_CAM_TRACE_H */
 /* This part must be outside protection */
 #include <trace/define_trace.h>
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
deleted file mode 100644
index ea7cf4d..0000000
--- a/include/uapi/linux/keychord.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *  Key chord input driver
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _UAPI_LINUX_KEYCHORD_H_
-#define _UAPI_LINUX_KEYCHORD_H_
-
-#include <linux/input.h>
-
-#define KEYCHORD_VERSION		1
-
-/*
- * One or more input_keychord structs are written to /dev/keychord
- * at once to specify the list of keychords to monitor.
- * Reading /dev/keychord returns the id of a keychord when the
- * keychord combination is pressed.  A keychord is signalled when
- * all of the keys in the keycode list are in the pressed state.
- * The order in which the keys are pressed does not matter.
- * The keychord will not be signalled if keys not in the keycode
- * list are pressed.
- * Keychords will not be signalled on key release events.
- */
-struct input_keychord {
-	/* should be KEYCHORD_VERSION */
-	__u16 version;
-	/*
-	 * client specified ID, returned from read()
-	 * when this keychord is pressed.
-	 */
-	__u16 id;
-
-	/* number of keycodes in this keychord */
-	__u16 count;
-
-	/* variable length array of keycodes */
-	__u16 keycodes[];
-};
-
-#endif	/* _UAPI_LINUX_KEYCHORD_H_ */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index f0320a0..df1faf7 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -208,6 +208,7 @@
 #define PR_SET_SPECULATION_CTRL		53
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS		0
+# define PR_SPEC_INDIRECT_BRANCH	1
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
 # define PR_SPEC_NOT_AFFECTED		0
 # define PR_SPEC_PRCTL			(1UL << 0)
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 2466e55..b48f747 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -33,7 +33,7 @@
  */
 
 
-#define TASKSTATS_VERSION	8
+#define TASKSTATS_VERSION	9
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -163,6 +163,10 @@
 	/* Delay waiting for memory reclaim */
 	__u64	freepages_count;
 	__u64	freepages_delay_total;
+
+	/* Delay waiting for thrashing page */
+	__u64	thrashing_count;
+	__u64	thrashing_delay_total;
 };
 
 
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index 63abd156..98cddcd 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -255,6 +255,9 @@
 	MSM_CAM_WRITE = 0,
 	MSM_CAM_POLL,
 	MSM_CAM_READ,
+#define MSM_CAM_READ_LOOP \
+	MSM_CAM_READ_LOOP
+	MSM_CAM_READ_LOOP = 3,
 };
 
 struct msm_sensor_i2c_sync_params {
diff --git a/init/Kconfig b/init/Kconfig
index c854ad5..158e2c1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -484,6 +484,45 @@
 
 	  Say N if unsure.
 
+config PSI
+	bool "Pressure stall information tracking"
+	help
+	  Collect metrics that indicate how overcommitted the CPU, memory,
+	  and IO capacity are in the system.
+
+	  If you say Y here, the kernel will create /proc/pressure/ with the
+	  pressure statistics files cpu, memory, and io. These will indicate
+	  the share of walltime in which some or all tasks in the system are
+	  delayed due to contention of the respective resource.
+
+	  In kernels with cgroup support, cgroups (cgroup2 only) will
+	  have cpu.pressure, memory.pressure, and io.pressure files,
+	  which aggregate pressure stalls for the grouped tasks only.
+
+	  For more details see Documentation/accounting/psi.txt.
+
+	  Say N if unsure.
+
+config PSI_DEFAULT_DISABLED
+	bool "Require boot parameter to enable pressure stall information tracking"
+	default n
+	depends on PSI
+	help
+	  If set, pressure stall information tracking will be disabled
+	  per default but can be enabled through passing psi=1 on the
+	  kernel commandline during boot.
+
+	  This feature adds some code to the task wakeup and sleep
+	  paths of the scheduler. The overhead is too low to affect
+	  common scheduling-intense workloads in practice (such as
+	  webservers, memcache), but it does show up in artificial
+	  scheduler stress tests, such as hackbench.
+
+	  If you are paranoid and not sure what the kernel will be
+	  used for, say Y.
+
+	  Say N if unsure.
+
 endmenu # "CPU/Task time and stats accounting"
 
 menu "RCU Subsystem"
diff --git a/init/main.c b/init/main.c
index f5670ca..a5c9e57 100644
--- a/init/main.c
+++ b/init/main.c
@@ -515,6 +515,8 @@
 	page_alloc_init();
 
 	pr_notice("Kernel command line: %s\n", boot_command_line);
+	/* parameters may set static keys */
+	jump_label_init();
 	parse_early_param();
 	after_dashes = parse_args("Booting kernel",
 				  static_command_line, __start___param,
@@ -524,8 +526,6 @@
 		parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
 			   NULL, set_init_arg);
 
-	jump_label_init();
-
 	/*
 	 * These use large bootmem allocations and must precede
 	 * kmem_cache_init()
@@ -552,6 +552,14 @@
 		 "Interrupts were enabled *very* early, fixing it\n"))
 		local_irq_disable();
 	idr_init_cache();
+
+	/*
+	 * Allow workqueue creation and work item queueing/cancelling
+	 * early.  Work item execution depends on kthreads and starts after
+	 * workqueue_init().
+	 */
+	workqueue_init_early();
+
 	rcu_init();
 
 	/* trace_printk() and trace points may be used after this */
@@ -637,9 +645,8 @@
 	security_init();
 	dbg_late_init();
 	vfs_caches_init();
+	pagecache_init();
 	signals_init();
-	/* rootfs populating might need page-writeback */
-	page_writeback_init();
 	proc_root_init();
 	nsfs_init();
 	cpuset_init();
@@ -1006,6 +1013,8 @@
 
 	smp_prepare_cpus(setup_max_cpus);
 
+	workqueue_init();
+
 	do_pre_smp_initcalls();
 	lockup_detector_init();
 
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 9c86d5d..b6e2bfd 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,12 +13,13 @@
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
+#include <linux/rculist_nulls.h>
 #include "percpu_freelist.h"
 #define HTAB_CREATE_FLAG_MASK						\
 	(BPF_F_NO_PREALLOC | BPF_F_RDONLY | BPF_F_WRONLY)
 
 struct bucket {
-	struct hlist_head head;
+	struct hlist_nulls_head head;
 	raw_spinlock_t lock;
 };
 
@@ -42,9 +43,14 @@
 /* each htab element is struct htab_elem + key + value */
 struct htab_elem {
 	union {
-		struct hlist_node hash_node;
-		struct bpf_htab *htab;
-		struct pcpu_freelist_node fnode;
+		struct hlist_nulls_node hash_node;
+		struct {
+			void *padding;
+			union {
+				struct bpf_htab *htab;
+				struct pcpu_freelist_node fnode;
+			};
+		};
 	};
 	union {
 		struct rcu_head rcu;
@@ -116,8 +122,10 @@
 	if (err)
 		goto free_elems;
 
-	pcpu_freelist_populate(&htab->freelist, htab->elems, htab->elem_size,
-			       htab->map.max_entries);
+	pcpu_freelist_populate(&htab->freelist,
+			       htab->elems + offsetof(struct htab_elem, fnode),
+			       htab->elem_size, htab->map.max_entries);
+
 	return 0;
 
 free_elems:
@@ -150,6 +158,11 @@
 	int err, i;
 	u64 cost;
 
+	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
+		     offsetof(struct htab_elem, hash_node.pprev));
+	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
+		     offsetof(struct htab_elem, hash_node.pprev));
+
 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
 		/* reserved bits should not be used */
 		return ERR_PTR(-EINVAL);
@@ -235,7 +248,7 @@
 		goto free_htab;
 
 	for (i = 0; i < htab->n_buckets; i++) {
-		INIT_HLIST_HEAD(&htab->buckets[i].head);
+		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
 		raw_spin_lock_init(&htab->buckets[i].lock);
 	}
 
@@ -272,28 +285,52 @@
 	return &htab->buckets[hash & (htab->n_buckets - 1)];
 }
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
 {
 	return &__select_bucket(htab, hash)->head;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+/* this lookup function can only be called with bucket lock taken */
+static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
 					 void *key, u32 key_size)
 {
+	struct hlist_nulls_node *n;
 	struct htab_elem *l;
 
-	hlist_for_each_entry_rcu(l, head, hash_node)
+	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 		if (l->hash == hash && !memcmp(&l->key, key, key_size))
 			return l;
 
 	return NULL;
 }
 
+/* can be called without bucket lock. it will repeat the loop in
+ * the unlikely event when elements moved from one bucket into another
+ * while link list is being walked
+ */
+static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
+					       u32 hash, void *key,
+					       u32 key_size, u32 n_buckets)
+{
+	struct hlist_nulls_node *n;
+	struct htab_elem *l;
+
+again:
+	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
+		if (l->hash == hash && !memcmp(&l->key, key, key_size))
+			return l;
+
+	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
+		goto again;
+
+	return NULL;
+}
+
 /* Called from syscall or from eBPF program */
 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-	struct hlist_head *head;
+	struct hlist_nulls_head *head;
 	struct htab_elem *l;
 	u32 hash, key_size;
 
@@ -306,7 +343,7 @@
 
 	head = select_bucket(htab, hash);
 
-	l = lookup_elem_raw(head, hash, key, key_size);
+	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 
 	return l;
 }
@@ -325,7 +362,7 @@
 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-	struct hlist_head *head;
+	struct hlist_nulls_head *head;
 	struct htab_elem *l, *next_l;
 	u32 hash, key_size;
 	int i = 0;
@@ -342,13 +379,13 @@
 	head = select_bucket(htab, hash);
 
 	/* lookup the key */
-	l = lookup_elem_raw(head, hash, key, key_size);
+	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
 
 	if (!l)
 		goto find_first_elem;
 
 	/* key was found, get next key in the same bucket */
-	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
 				  struct htab_elem, hash_node);
 
 	if (next_l) {
@@ -367,7 +404,7 @@
 		head = select_bucket(htab, i);
 
 		/* pick first element in the bucket */
-		next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
 					  struct htab_elem, hash_node);
 		if (next_l) {
 			/* if it's not empty, just return it */
@@ -431,9 +468,13 @@
 	int err = 0;
 
 	if (prealloc) {
-		l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
-		if (!l_new)
+		struct pcpu_freelist_node *l;
+
+		l = pcpu_freelist_pop(&htab->freelist);
+		if (!l)
 			err = -E2BIG;
+		else
+			l_new = container_of(l, struct htab_elem, fnode);
 	} else {
 		if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
 			atomic_dec(&htab->count);
@@ -520,7 +561,7 @@
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 	struct htab_elem *l_new = NULL, *l_old;
-	struct hlist_head *head;
+	struct hlist_nulls_head *head;
 	unsigned long flags;
 	struct bucket *b;
 	u32 key_size, hash;
@@ -559,9 +600,9 @@
 	/* add new element to the head of the list, so that
 	 * concurrent search will find it before old elem
 	 */
-	hlist_add_head_rcu(&l_new->hash_node, head);
+	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 	if (l_old) {
-		hlist_del_rcu(&l_old->hash_node);
+		hlist_nulls_del_rcu(&l_old->hash_node);
 		free_htab_elem(htab, l_old);
 	}
 	ret = 0;
@@ -576,7 +617,7 @@
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 	struct htab_elem *l_new = NULL, *l_old;
-	struct hlist_head *head;
+	struct hlist_nulls_head *head;
 	unsigned long flags;
 	struct bucket *b;
 	u32 key_size, hash;
@@ -628,7 +669,7 @@
 			ret = PTR_ERR(l_new);
 			goto err;
 		}
-		hlist_add_head_rcu(&l_new->hash_node, head);
+		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
 	}
 	ret = 0;
 err:
@@ -646,7 +687,7 @@
 static int htab_map_delete_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-	struct hlist_head *head;
+	struct hlist_nulls_head *head;
 	struct bucket *b;
 	struct htab_elem *l;
 	unsigned long flags;
@@ -666,7 +707,7 @@
 	l = lookup_elem_raw(head, hash, key, key_size);
 
 	if (l) {
-		hlist_del_rcu(&l->hash_node);
+		hlist_nulls_del_rcu(&l->hash_node);
 		free_htab_elem(htab, l);
 		ret = 0;
 	}
@@ -680,12 +721,12 @@
 	int i;
 
 	for (i = 0; i < htab->n_buckets; i++) {
-		struct hlist_head *head = select_bucket(htab, i);
-		struct hlist_node *n;
+		struct hlist_nulls_head *head = select_bucket(htab, i);
+		struct hlist_nulls_node *n;
 		struct htab_elem *l;
 
-		hlist_for_each_entry_safe(l, n, head, hash_node) {
-			hlist_del_rcu(&l->hash_node);
+		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+			hlist_nulls_del_rcu(&l->hash_node);
 			if (l->state != HTAB_EXTRA_ELEM_USED)
 				htab_elem_free(htab, l);
 		}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 7e48fa6..002a0b9 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -325,7 +325,7 @@
 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
 {
 	struct bpf_prog *prog;
-	int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
+	int ret = inode_permission(inode, MAY_READ);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a83771f..1953033 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -62,6 +62,7 @@
 #include <linux/proc_ns.h>
 #include <linux/nsproxy.h>
 #include <linux/file.h>
+#include <linux/psi.h>
 #include <net/sock.h>
 
 #define CREATE_TRACE_POINTS
@@ -361,15 +362,6 @@
 	spin_unlock_bh(&cgroup_idr_lock);
 }
 
-static struct cgroup *cgroup_parent(struct cgroup *cgrp)
-{
-	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
-
-	if (parent_css)
-		return container_of(parent_css, struct cgroup, self);
-	return NULL;
-}
-
 /* subsystems visibly enabled on a cgroup */
 static u16 cgroup_control(struct cgroup *cgrp)
 {
@@ -487,17 +479,6 @@
 	return !(cgrp->self.flags & CSS_ONLINE);
 }
 
-static void cgroup_get(struct cgroup *cgrp)
-{
-	WARN_ON_ONCE(cgroup_is_dead(cgrp));
-	css_get(&cgrp->self);
-}
-
-static bool cgroup_tryget(struct cgroup *cgrp)
-{
-	return css_tryget(&cgrp->self);
-}
-
 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
 {
 	struct cgroup *cgrp = of->kn->parent->priv;
@@ -781,7 +762,7 @@
 		 */
 		WARN_ON_ONCE(task->flags & PF_EXITING);
 
-		rcu_assign_pointer(task->cgroups, to_cset);
+		cgroup_move_task(task, to_cset);
 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
 							     &to_cset->tasks);
 	}
@@ -3525,6 +3506,96 @@
 	return 0;
 }
 
+#ifdef CONFIG_PSI
+static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_IO);
+}
+static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_MEM);
+}
+static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
+{
+	return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
+}
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
+					  size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *new;
+	struct cgroup *cgrp;
+
+	cgrp = cgroup_kn_lock_live(of->kn, false);
+	if (!cgrp)
+		return -ENODEV;
+
+	cgroup_get(cgrp);
+	cgroup_kn_unlock(of->kn);
+
+	new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+	if (IS_ERR(new)) {
+		cgroup_put(cgrp);
+		return PTR_ERR(new);
+	}
+
+	psi_trigger_replace(&of->priv, new);
+
+	cgroup_put(cgrp);
+
+	return nbytes;
+}
+
+static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+}
+
+static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+}
+
+static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
+					  char *buf, size_t nbytes,
+					  loff_t off)
+{
+	return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+static unsigned int cgroup_pressure_poll(struct kernfs_open_file *of,
+					 poll_table *pt)
+{
+	return psi_trigger_poll(&of->priv, of->file, pt);
+}
+
+static void cgroup_pressure_release(struct kernfs_open_file *of)
+{
+	psi_trigger_replace(&of->priv, NULL);
+}
+#endif /* CONFIG_PSI */
+
+static int cgroup_file_open(struct kernfs_open_file *of)
+{
+	struct cftype *cft = of->kn->priv;
+
+	if (cft->open)
+		return cft->open(of);
+	return 0;
+}
+
+static void cgroup_file_release(struct kernfs_open_file *of)
+{
+	struct cftype *cft = of->kn->priv;
+
+	if (cft->release)
+		cft->release(of);
+}
+
 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
 				 size_t nbytes, loff_t off)
 {
@@ -3563,6 +3634,16 @@
 	return ret ?: nbytes;
 }
 
+static unsigned int cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
+{
+	struct cftype *cft = of->kn->priv;
+
+	if (cft->poll)
+		return cft->poll(of, pt);
+
+	return kernfs_generic_poll(of, pt);
+}
+
 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
 {
 	return seq_cft(seq)->seq_start(seq, ppos);
@@ -3575,7 +3656,8 @@
 
 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
 {
-	seq_cft(seq)->seq_stop(seq, v);
+	if (seq_cft(seq)->seq_stop)
+		seq_cft(seq)->seq_stop(seq, v);
 }
 
 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
@@ -3597,13 +3679,19 @@
 
 static struct kernfs_ops cgroup_kf_single_ops = {
 	.atomic_write_len	= PAGE_SIZE,
+	.open			= cgroup_file_open,
+	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_show		= cgroup_seqfile_show,
 };
 
 static struct kernfs_ops cgroup_kf_ops = {
 	.atomic_write_len	= PAGE_SIZE,
+	.open			= cgroup_file_open,
+	.release		= cgroup_file_release,
 	.write			= cgroup_file_write,
+	.poll			= cgroup_file_poll,
 	.seq_start		= cgroup_seqfile_start,
 	.seq_next		= cgroup_seqfile_next,
 	.seq_stop		= cgroup_seqfile_stop,
@@ -4940,6 +5028,32 @@
 		.file_offset = offsetof(struct cgroup, events_file),
 		.seq_show = cgroup_events_show,
 	},
+#ifdef CONFIG_PSI
+	{
+		.name = "io.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_io_pressure_show,
+		.write = cgroup_io_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+	{
+		.name = "memory.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_memory_pressure_show,
+		.write = cgroup_memory_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+	{
+		.name = "cpu.pressure",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = cgroup_cpu_pressure_show,
+		.write = cgroup_cpu_pressure_write,
+		.poll = cgroup_pressure_poll,
+		.release = cgroup_pressure_release,
+	},
+#endif /* CONFIG_PSI */
 	{ }	/* terminate */
 };
 
@@ -5045,6 +5159,8 @@
 			 */
 			cgroup_put(cgroup_parent(cgrp));
 			kernfs_put(cgrp->kn);
+			if (cgroup_on_dfl(cgrp))
+				psi_cgroup_free(cgrp);
 			kfree(cgrp);
 		} else {
 			/*
@@ -5314,6 +5430,12 @@
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (cgroup_on_dfl(cgrp)) {
+		ret = psi_cgroup_alloc(cgrp);
+		if (ret)
+			goto out_idr_free;
+	}
+
 	if (parent)
 		cgroup_bpf_inherit(cgrp, parent);
 
@@ -5321,6 +5443,8 @@
 
 	return cgrp;
 
+out_idr_free:
+	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
 out_cancel_ref:
 	percpu_ref_exit(&cgrp->self.refcnt);
 out_free_cgrp:
diff --git a/kernel/cpu.c b/kernel/cpu.c
index dd067c0..641fc19 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/notifier.h>
 #include <linux/sched.h>
+#include <linux/sched/smt.h>
 #include <linux/unistd.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
@@ -362,6 +363,12 @@
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif	/* CONFIG_HOTPLUG_CPU */
 
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
 #ifdef CONFIG_HOTPLUG_SMT
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -599,6 +606,20 @@
 	}
 }
 
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+		return true;
+	/*
+	 * When CPU hotplug is disabled, then taking the CPU down is not
+	 * possible because takedown_cpu() and the architecture and
+	 * subsystem specific mechanisms are not available. So the CPU
+	 * which would be completely unplugged again needs to stay around
+	 * in the current state.
+	 */
+	return st->state <= CPUHP_BRINGUP_CPU;
+}
+
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 			      enum cpuhp_state target)
 {
@@ -609,8 +630,10 @@
 		st->state++;
 		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
 		if (ret) {
-			st->target = prev_state;
-			undo_cpu_up(cpu, st);
+			if (can_rollback_cpu(st)) {
+				st->target = prev_state;
+				undo_cpu_up(cpu, st);
+			}
 			break;
 		}
 	}
@@ -1057,6 +1080,7 @@
 	/* This post dead nonsense must die */
 	if (!ret && hasdied)
 		cpu_notify_nofail(CPU_POST_DEAD, cpu);
+	arch_smt_update();
 	return ret;
 }
 
@@ -1180,6 +1204,7 @@
 out:
 	trace_cpuhp_latency(cpu, 1, start_time, ret);
 	cpu_hotplug_done();
+	arch_smt_update();
 	return ret;
 }
 
@@ -2065,8 +2090,10 @@
 		 */
 		cpuhp_offline_cpu_device(cpu);
 	}
-	if (!ret)
+	if (!ret) {
 		cpu_smt_control = ctrlval;
+		arch_smt_update();
+	}
 	cpu_maps_update_done();
 	return ret;
 }
@@ -2077,6 +2104,7 @@
 
 	cpu_maps_update_begin();
 	cpu_smt_control = CPU_SMT_ENABLED;
+	arch_smt_update();
 	for_each_present_cpu(cpu) {
 		/* Skip online CPUs and CPUs on offline nodes */
 		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
@@ -2284,6 +2312,21 @@
 	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 }
 
+enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+
+static int __init mitigations_parse_cmdline(char *arg)
+{
+	if (!strcmp(arg, "off"))
+		cpu_mitigations = CPU_MITIGATIONS_OFF;
+	else if (!strcmp(arg, "auto"))
+		cpu_mitigations = CPU_MITIGATIONS_AUTO;
+	else if (!strcmp(arg, "auto,nosmt"))
+		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+
+	return 0;
+}
+early_param("mitigations", mitigations_parse_cmdline);
+
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 
 void idle_notifier_register(struct notifier_block *n)
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 5a58421..12076ff 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -18,6 +18,7 @@
 #include <linux/kmsg_dump.h>
 #include <linux/reboot.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/sysrq.h>
 #include <linux/smp.h>
 #include <linux/utsname.h>
@@ -2584,16 +2585,11 @@
 	}
 	kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
 
-	/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
-
-#define LOAD_INT(x) ((x) >> FSHIFT)
-#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 	kdb_printf("load avg   %ld.%02ld %ld.%02ld %ld.%02ld\n",
 		LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
 		LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
 		LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
-#undef LOAD_INT
-#undef LOAD_FRAC
+
 	/* Display in kilobytes */
 #define K(x) ((x) << (PAGE_SHIFT - 10))
 	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 435c14a..3fde7df 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -124,9 +124,12 @@
 	d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
 	tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
 	d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
+	tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
+	d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
 	d->blkio_count += tsk->delays->blkio_count;
 	d->swapin_count += tsk->delays->swapin_count;
 	d->freepages_count += tsk->delays->freepages_count;
+	d->thrashing_count += tsk->delays->thrashing_count;
 	spin_unlock_irqrestore(&tsk->delays->lock, flags);
 
 	return 0;
@@ -156,3 +159,14 @@
 			&current->delays->freepages_count);
 }
 
+void __delayacct_thrashing_start(void)
+{
+	current->delays->thrashing_start = ktime_get_ns();
+}
+
+void __delayacct_thrashing_end(void)
+{
+	delayacct_end(&current->delays->thrashing_start,
+		      &current->delays->thrashing_delay,
+		      &current->delays->thrashing_count);
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e8f0970..1dbbc81 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6792,6 +6792,7 @@
 	struct perf_output_handle handle;
 	struct perf_sample_data sample;
 	int size = mmap_event->event_id.header.size;
+	u32 type = mmap_event->event_id.header.type;
 	int ret;
 
 	if (!perf_event_mmap_match(event, data))
@@ -6835,6 +6836,7 @@
 	perf_output_end(&handle);
 out:
 	mmap_event->event_id.header.size = size;
+	mmap_event->event_id.header.type = type;
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
diff --git a/kernel/fork.c b/kernel/fork.c
index f1cd160..b4b4108 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1625,6 +1625,10 @@
 
 	p->default_timer_slack_ns = current->timer_slack_ns;
 
+#ifdef CONFIG_PSI
+	p->psi_flags = 0;
+#endif
+
 	task_io_accounting_init(&p->ioac);
 	acct_clear_integrals(p);
 
diff --git a/kernel/futex.c b/kernel/futex.c
index 30fe043..2e766ff 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3110,6 +3110,10 @@
 {
 	u32 uval, uninitialized_var(nval), mval;
 
+	/* Futex address must be 32bit aligned */
+	if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+		return -1;
+
 retry:
 	if (get_user(uval, uaddr))
 		return -1;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 1276aab..146b789 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -53,23 +53,17 @@
 choice
 	prompt "Specify GCOV format"
 	depends on GCOV_KERNEL
-	default GCOV_FORMAT_AUTODETECT
+	default GCOV_FORMAT_CLANG
 	---help---
-	The gcov format is usually determined by the GCC version, but there are
+	The gcov format is usually determined by the GCC version, and the
+	default is chosen according to your GCC version. However, there are
 	exceptions where format changes are integrated in lower-version GCCs.
-	In such a case use this option to adjust the format used in the kernel
-	accordingly.
-
-	If unsure, choose "Autodetect".
-
-config GCOV_FORMAT_AUTODETECT
-	bool "Autodetect"
-	---help---
-	Select this option to use the format that corresponds to your GCC
-	version.
+	In such a case, change this option to adjust the format used in the
+	kernel accordingly.
 
 config GCOV_FORMAT_3_4
 	bool "GCC 3.4 format"
+	depends on GCC_VERSION < 40700
 	---help---
 	Select this option to use the format defined by GCC 3.4.
 
@@ -78,6 +72,11 @@
 	---help---
 	Select this option to use the format defined by GCC 4.7.
 
+config GCOV_FORMAT_CLANG
+	bool "Clang format"
+	---help---
+	Select this option to use the format defined by Clang.
+
 endchoice
 
 endmenu
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index 752d648..87f91c0 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -1,7 +1,6 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
 obj-y := base.o fs.o
-obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
-obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
-obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
-							gcc_3_4.o, gcc_4_7.o)
+obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_base.o gcc_3_4.o
+obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_base.o gcc_4_7.o
+obj-$(CONFIG_GCOV_FORMAT_CLANG) += clang.o
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index c51a49c..d1e7b70 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -21,88 +21,8 @@
 #include <linux/sched.h>
 #include "gcov.h"
 
-static int gcov_events_enabled;
-static DEFINE_MUTEX(gcov_lock);
-
-/*
- * __gcov_init is called by gcc-generated constructor code for each object
- * file compiled with -fprofile-arcs.
- */
-void __gcov_init(struct gcov_info *info)
-{
-	static unsigned int gcov_version;
-
-	mutex_lock(&gcov_lock);
-	if (gcov_version == 0) {
-		gcov_version = gcov_info_version(info);
-		/*
-		 * Printing gcc's version magic may prove useful for debugging
-		 * incompatibility reports.
-		 */
-		pr_info("version magic: 0x%x\n", gcov_version);
-	}
-	/*
-	 * Add new profiling data structure to list and inform event
-	 * listener.
-	 */
-	gcov_info_link(info);
-	if (gcov_events_enabled)
-		gcov_event(GCOV_ADD, info);
-	mutex_unlock(&gcov_lock);
-}
-EXPORT_SYMBOL(__gcov_init);
-
-/*
- * These functions may be referenced by gcc-generated profiling code but serve
- * no function for kernel profiling.
- */
-void __gcov_flush(void)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_flush);
-
-void __gcov_merge_add(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_add);
-
-void __gcov_merge_single(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_single);
-
-void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_delta);
-
-void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_ior);
-
-void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_time_profile);
-
-void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_merge_icall_topn);
-
-void __gcov_exit(void)
-{
-	/* Unused. */
-}
-EXPORT_SYMBOL(__gcov_exit);
+int gcov_events_enabled;
+DEFINE_MUTEX(gcov_lock);
 
 /**
  * gcov_enable_events - enable event reporting through gcov_event()
@@ -143,7 +63,7 @@
 
 	/* Remove entries located in module from linked list. */
 	while ((info = gcov_info_next(info))) {
-		if (within_module((unsigned long)info, mod)) {
+		if (gcov_info_within_module(info, mod)) {
 			gcov_info_unlink(prev, info);
 			if (gcov_events_enabled)
 				gcov_event(GCOV_REMOVE, info);
diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
new file mode 100644
index 0000000..c94b820
--- /dev/null
+++ b/kernel/gcov/clang.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Google, Inc.
+ * modified from kernel/gcov/gcc_4_7.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * LLVM uses profiling data that's deliberately similar to GCC, but has a
+ * very different way of exporting that data.  LLVM calls llvm_gcov_init() once
+ * per module, and provides a couple of callbacks that we can use to ask for
+ * more data.
+ *
+ * We care about the "writeout" callback, which in turn calls back into
+ * compiler-rt/this module to dump all the gathered coverage data to disk:
+ *
+ *    llvm_gcda_start_file()
+ *      llvm_gcda_emit_function()
+ *      llvm_gcda_emit_arcs()
+ *      llvm_gcda_emit_function()
+ *      llvm_gcda_emit_arcs()
+ *      [... repeats for each function ...]
+ *    llvm_gcda_summary_info()
+ *    llvm_gcda_end_file()
+ *
+ * This design is much more stateless and unstructured than gcc's, and is
+ * intended to run at process exit.  This forces us to keep some local state
+ * about which module we're dealing with at the moment.  On the other hand, it
+ * also means we don't depend as much on how LLVM represents profiling data
+ * internally.
+ *
+ * See LLVM's lib/Transforms/Instrumentation/GCOVProfiling.cpp for more
+ * details on how this works, particularly GCOVProfiler::emitProfileArcs(),
+ * GCOVProfiler::insertCounterWriteout(), and
+ * GCOVProfiler::insertFlush().
+ */
+
+#define pr_fmt(fmt)	"gcov: " fmt
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "gcov.h"
+
+typedef void (*llvm_gcov_callback)(void);
+
+struct gcov_info {
+	struct list_head head;
+
+	const char *filename;
+	unsigned int version;
+	u32 checksum;
+
+	struct list_head functions;
+};
+
+struct gcov_fn_info {
+	struct list_head head;
+
+	u32 ident;
+	u32 checksum;
+	u8 use_extra_checksum;
+	u32 cfg_checksum;
+
+	u32 num_counters;
+	u64 *counters;
+	const char *function_name;
+};
+
+static struct gcov_info *current_info;
+
+static LIST_HEAD(clang_gcov_list);
+
+void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
+{
+	struct gcov_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (!info)
+		return;
+
+	INIT_LIST_HEAD(&info->head);
+	INIT_LIST_HEAD(&info->functions);
+
+	mutex_lock(&gcov_lock);
+
+	list_add_tail(&info->head, &clang_gcov_list);
+	current_info = info;
+	writeout();
+	current_info = NULL;
+	if (gcov_events_enabled)
+		gcov_event(GCOV_ADD, info);
+
+	mutex_unlock(&gcov_lock);
+}
+EXPORT_SYMBOL(llvm_gcov_init);
+
+void llvm_gcda_start_file(const char *orig_filename, const char version[4],
+		u32 checksum)
+{
+	current_info->filename = orig_filename;
+	memcpy(&current_info->version, version, sizeof(current_info->version));
+	current_info->checksum = checksum;
+}
+EXPORT_SYMBOL(llvm_gcda_start_file);
+
+void llvm_gcda_emit_function(u32 ident, const char *function_name,
+		u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
+{
+	struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (!info)
+		return;
+
+	INIT_LIST_HEAD(&info->head);
+	info->ident = ident;
+	info->checksum = func_checksum;
+	info->use_extra_checksum = use_extra_checksum;
+	info->cfg_checksum = cfg_checksum;
+	if (function_name)
+		info->function_name = kstrdup(function_name, GFP_KERNEL);
+
+	list_add_tail(&info->head, &current_info->functions);
+}
+EXPORT_SYMBOL(llvm_gcda_emit_function);
+
+void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
+{
+	struct gcov_fn_info *info = list_last_entry(&current_info->functions,
+			struct gcov_fn_info, head);
+
+	info->num_counters = num_counters;
+	info->counters = counters;
+}
+EXPORT_SYMBOL(llvm_gcda_emit_arcs);
+
+void llvm_gcda_summary_info(void)
+{
+}
+EXPORT_SYMBOL(llvm_gcda_summary_info);
+
+void llvm_gcda_end_file(void)
+{
+}
+EXPORT_SYMBOL(llvm_gcda_end_file);
+
+/**
+ * gcov_info_filename - return info filename
+ * @info: profiling data set
+ */
+const char *gcov_info_filename(struct gcov_info *info)
+{
+	return info->filename;
+}
+
+/**
+ * gcov_info_version - return info version
+ * @info: profiling data set
+ */
+unsigned int gcov_info_version(struct gcov_info *info)
+{
+	return info->version;
+}
+
+/**
+ * gcov_info_next - return next profiling data set
+ * @info: profiling data set
+ *
+ * Returns next gcov_info following @info or first gcov_info in the chain if
+ * @info is %NULL.
+ */
+struct gcov_info *gcov_info_next(struct gcov_info *info)
+{
+	if (!info)
+		return list_first_entry_or_null(&clang_gcov_list,
+				struct gcov_info, head);
+	if (list_is_last(&info->head, &clang_gcov_list))
+		return NULL;
+	return list_next_entry(info, head);
+}
+
+/**
+ * gcov_info_link - link/add profiling data set to the list
+ * @info: profiling data set
+ */
+void gcov_info_link(struct gcov_info *info)
+{
+	list_add_tail(&info->head, &clang_gcov_list);
+}
+
+/**
+ * gcov_info_unlink - unlink/remove profiling data set from the list
+ * @prev: previous profiling data set
+ * @info: profiling data set
+ */
+void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
+{
+	/* Generic code unlinks while iterating. */
+	__list_del_entry(&info->head);
+}
+
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info->filename, mod);
+}
+
+/* Symbolic links to be created for each profiling data file. */
+const struct gcov_link gcov_link[] = {
+	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
+	{ 0, NULL},
+};
+
+/**
+ * gcov_info_reset - reset profiling data to zero
+ * @info: profiling data set
+ */
+void gcov_info_reset(struct gcov_info *info)
+{
+	struct gcov_fn_info *fn;
+
+	list_for_each_entry(fn, &info->functions, head)
+		memset(fn->counters, 0,
+				sizeof(fn->counters[0]) * fn->num_counters);
+}
+
+/**
+ * gcov_info_is_compatible - check if profiling data can be added
+ * @info1: first profiling data set
+ * @info2: second profiling data set
+ *
+ * Returns non-zero if profiling data can be added, zero otherwise.
+ */
+int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
+{
+	struct gcov_fn_info *fn_ptr1 = list_first_entry_or_null(
+			&info1->functions, struct gcov_fn_info, head);
+	struct gcov_fn_info *fn_ptr2 = list_first_entry_or_null(
+			&info2->functions, struct gcov_fn_info, head);
+
+	if (info1->checksum != info2->checksum)
+		return false;
+	if (!fn_ptr1)
+		return fn_ptr1 == fn_ptr2;
+	while (!list_is_last(&fn_ptr1->head, &info1->functions) &&
+		!list_is_last(&fn_ptr2->head, &info2->functions)) {
+		if (fn_ptr1->checksum != fn_ptr2->checksum)
+			return false;
+		if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
+			return false;
+		if (fn_ptr1->use_extra_checksum &&
+			fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+			return false;
+		fn_ptr1 = list_next_entry(fn_ptr1, head);
+		fn_ptr2 = list_next_entry(fn_ptr2, head);
+	}
+	return list_is_last(&fn_ptr1->head, &info1->functions) &&
+		list_is_last(&fn_ptr2->head, &info2->functions);
+}
+
+/**
+ * gcov_info_add - add up profiling data
+ * @dest: profiling data set to which data is added
+ * @source: profiling data set which is added
+ *
+ * Adds profiling counts of @source to @dest.
+ */
+void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
+{
+	struct gcov_fn_info *dfn_ptr;
+	struct gcov_fn_info *sfn_ptr = list_first_entry_or_null(&src->functions,
+			struct gcov_fn_info, head);
+
+	list_for_each_entry(dfn_ptr, &dst->functions, head) {
+		u32 i;
+
+		for (i = 0; i < sfn_ptr->num_counters; i++)
+			dfn_ptr->counters[i] += sfn_ptr->counters[i];
+	}
+}
+
+static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
+{
+	size_t cv_size; /* counter values size */
+	struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
+			GFP_KERNEL);
+	if (!fn_dup)
+		return NULL;
+	INIT_LIST_HEAD(&fn_dup->head);
+
+	fn_dup->function_name = kstrdup(fn->function_name, GFP_KERNEL);
+	if (!fn_dup->function_name)
+		goto err_name;
+
+	cv_size = fn->num_counters * sizeof(fn->counters[0]);
+	fn_dup->counters = vmalloc(cv_size);
+	if (!fn_dup->counters)
+		goto err_counters;
+	memcpy(fn_dup->counters, fn->counters, cv_size);
+
+	return fn_dup;
+
+err_counters:
+	kfree(fn_dup->function_name);
+err_name:
+	kfree(fn_dup);
+	return NULL;
+}
+
+/**
+ * gcov_info_dup - duplicate profiling data set
+ * @info: profiling data set to duplicate
+ *
+ * Return newly allocated duplicate on success, %NULL on error.
+ */
+struct gcov_info *gcov_info_dup(struct gcov_info *info)
+{
+	struct gcov_info *dup;
+	struct gcov_fn_info *fn;
+
+	dup = kmemdup(info, sizeof(*dup), GFP_KERNEL);
+	if (!dup)
+		return NULL;
+	INIT_LIST_HEAD(&dup->head);
+	INIT_LIST_HEAD(&dup->functions);
+	dup->filename = kstrdup(info->filename, GFP_KERNEL);
+	if (!dup->filename)
+		goto err;
+
+	list_for_each_entry(fn, &info->functions, head) {
+		struct gcov_fn_info *fn_dup = gcov_fn_info_dup(fn);
+
+		if (!fn_dup)
+			goto err;
+		list_add_tail(&fn_dup->head, &dup->functions);
+	}
+
+	return dup;
+
+err:
+	gcov_info_free(dup);
+	return NULL;
+}
+
+/**
+ * gcov_info_free - release memory for profiling data set duplicate
+ * @info: profiling data set duplicate to free
+ */
+void gcov_info_free(struct gcov_info *info)
+{
+	struct gcov_fn_info *fn, *tmp;
+
+	list_for_each_entry_safe(fn, tmp, &info->functions, head) {
+		kfree(fn->function_name);
+		vfree(fn->counters);
+		list_del(&fn->head);
+		kfree(fn);
+	}
+	kfree(info->filename);
+	kfree(info);
+}
+
+#define ITER_STRIDE	PAGE_SIZE
+
+/**
+ * struct gcov_iterator - specifies current file position in logical records
+ * @info: associated profiling data
+ * @buffer: buffer containing file data
+ * @size: size of buffer
+ * @pos: current position in file
+ */
+struct gcov_iterator {
+	struct gcov_info *info;
+	void *buffer;
+	size_t size;
+	loff_t pos;
+};
+
+/**
+ * store_gcov_u32 - store 32 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. Returns the number of bytes stored. If @buffer is %NULL, doesn't
+ * store anything.
+ */
+static size_t store_gcov_u32(void *buffer, size_t off, u32 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+		*data = v;
+	}
+
+	return sizeof(*data);
+}
+
+/**
+ * store_gcov_u64 - store 64 bit number in gcov format to buffer
+ * @buffer: target buffer or NULL
+ * @off: offset into the buffer
+ * @v: value to be stored
+ *
+ * Number format defined by gcc: numbers are recorded in the 32 bit
+ * unsigned binary form of the endianness of the machine generating the
+ * file. 64 bit numbers are stored as two 32 bit numbers, the low part
+ * first. Returns the number of bytes stored. If @buffer is %NULL, doesn't store
+ * anything.
+ */
+static size_t store_gcov_u64(void *buffer, size_t off, u64 v)
+{
+	u32 *data;
+
+	if (buffer) {
+		data = buffer + off;
+
+		data[0] = (v & 0xffffffffUL);
+		data[1] = (v >> 32);
+	}
+
+	return sizeof(*data) * 2;
+}
+
+/**
+ * convert_to_gcda - convert profiling data set to gcda file format
+ * @buffer: the buffer to store file data or %NULL if no data should be stored
+ * @info: profiling data set to be converted
+ *
+ * Returns the number of bytes that were/would have been stored into the buffer.
+ */
+static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
+{
+	struct gcov_fn_info *fi_ptr;
+	size_t pos = 0;
+
+	/* File header. */
+	pos += store_gcov_u32(buffer, pos, GCOV_DATA_MAGIC);
+	pos += store_gcov_u32(buffer, pos, info->version);
+	pos += store_gcov_u32(buffer, pos, info->checksum);
+
+	list_for_each_entry(fi_ptr, &info->functions, head) {
+		u32 i;
+		u32 len = 2;
+
+		if (fi_ptr->use_extra_checksum)
+			len++;
+
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
+		pos += store_gcov_u32(buffer, pos, len);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
+		if (fi_ptr->use_extra_checksum)
+			pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+
+		pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
+		pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
+		for (i = 0; i < fi_ptr->num_counters; i++)
+			pos += store_gcov_u64(buffer, pos, fi_ptr->counters[i]);
+	}
+
+	return pos;
+}
+
+/**
+ * gcov_iter_new - allocate and initialize profiling data iterator
+ * @info: profiling data set to be iterated
+ *
+ * Return file iterator on success, %NULL otherwise.
+ */
+struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
+{
+	struct gcov_iterator *iter;
+
+	iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL);
+	if (!iter)
+		goto err_free;
+
+	iter->info = info;
+	/* Dry-run to get the actual buffer size. */
+	iter->size = convert_to_gcda(NULL, info);
+	iter->buffer = vmalloc(iter->size);
+	if (!iter->buffer)
+		goto err_free;
+
+	convert_to_gcda(iter->buffer, info);
+
+	return iter;
+
+err_free:
+	kfree(iter);
+	return NULL;
+}
+
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+void gcov_iter_free(struct gcov_iterator *iter)
+{
+	vfree(iter->buffer);
+	kfree(iter);
+}
+
+/**
+ * gcov_iter_get_info - return profiling data set for given file iterator
+ * @iter: file iterator
+ */
+struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
+{
+	return iter->info;
+}
+
+/**
+ * gcov_iter_start - reset file iterator to starting position
+ * @iter: file iterator
+ */
+void gcov_iter_start(struct gcov_iterator *iter)
+{
+	iter->pos = 0;
+}
+
+/**
+ * gcov_iter_next - advance file iterator to next logical record
+ * @iter: file iterator
+ *
+ * Return zero if new position is valid, non-zero if iterator has reached end.
+ */
+int gcov_iter_next(struct gcov_iterator *iter)
+{
+	if (iter->pos < iter->size)
+		iter->pos += ITER_STRIDE;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * gcov_iter_write - write data for current pos to seq_file
+ * @iter: file iterator
+ * @seq: seq_file handle
+ *
+ * Return zero on success, non-zero otherwise.
+ */
+int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
+{
+	size_t len;
+
+	if (iter->pos >= iter->size)
+		return -EINVAL;
+
+	len = ITER_STRIDE;
+	if (iter->pos + len > iter->size)
+		len = iter->size - iter->pos;
+
+	seq_write(seq, iter->buffer + iter->pos, len);
+
+	return 0;
+}
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
index 27bc88a..f368cde 100644
--- a/kernel/gcov/gcc_3_4.c
+++ b/kernel/gcov/gcc_3_4.c
@@ -136,6 +136,18 @@
 		gcov_info_head = info->next;
 }
 
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info, mod);
+}
+
 /* Symbolic links to be created for each profiling data file. */
 const struct gcov_link gcov_link[] = {
 	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 46a18e7..00564f4 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -149,6 +149,18 @@
 		gcov_info_head = info->next;
 }
 
+/**
+ * gcov_info_within_module - check if a profiling data set belongs to a module
+ * @info: profiling data set
+ * @mod: module
+ *
+ * Returns true if profiling data belongs module, false otherwise.
+ */
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
+{
+	return within_module((unsigned long)info, mod);
+}
+
 /* Symbolic links to be created for each profiling data file. */
 const struct gcov_link gcov_link[] = {
 	{ OBJ_TREE, "gcno" },	/* Link to .gcno file in $(objtree). */
diff --git a/kernel/gcov/gcc_base.c b/kernel/gcov/gcc_base.c
new file mode 100644
index 0000000..3cf736b
--- /dev/null
+++ b/kernel/gcov/gcc_base.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include "gcov.h"
+
+/*
+ * __gcov_init is called by gcc-generated constructor code for each object
+ * file compiled with -fprofile-arcs.
+ */
+void __gcov_init(struct gcov_info *info)
+{
+	static unsigned int gcov_version;
+
+	mutex_lock(&gcov_lock);
+	if (gcov_version == 0) {
+		gcov_version = gcov_info_version(info);
+		/*
+		 * Printing gcc's version magic may prove useful for debugging
+		 * incompatibility reports.
+		 */
+		pr_info("version magic: 0x%x\n", gcov_version);
+	}
+	/*
+	 * Add new profiling data structure to list and inform event
+	 * listener.
+	 */
+	gcov_info_link(info);
+	if (gcov_events_enabled)
+		gcov_event(GCOV_ADD, info);
+	mutex_unlock(&gcov_lock);
+}
+EXPORT_SYMBOL(__gcov_init);
+
+/*
+ * These functions may be referenced by gcc-generated profiling code but serve
+ * no function for kernel profiling.
+ */
+void __gcov_flush(void)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_flush);
+
+void __gcov_merge_add(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_add);
+
+void __gcov_merge_single(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_single);
+
+void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_delta);
+
+void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_ior);
+
+void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_time_profile);
+
+void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_merge_icall_topn);
+
+void __gcov_exit(void)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h
index 92c8e22..c69539d 100644
--- a/kernel/gcov/gcov.h
+++ b/kernel/gcov/gcov.h
@@ -14,6 +14,7 @@
 #ifndef GCOV_H
 #define GCOV_H GCOV_H
 
+#include <linux/module.h>
 #include <linux/types.h>
 
 /*
@@ -45,6 +46,7 @@
 struct gcov_info *gcov_info_next(struct gcov_info *info);
 void gcov_info_link(struct gcov_info *info);
 void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info);
+bool gcov_info_within_module(struct gcov_info *info, struct module *mod);
 
 /* Base interface. */
 enum gcov_action {
@@ -82,4 +84,7 @@
 };
 extern const struct gcov_link gcov_link[];
 
+extern int gcov_events_enabled;
+extern struct mutex gcov_lock;
+
 #endif /* GCOV_H */
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index c7e79d8..58125b7 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
 #include <linux/lockdep.h>
 #include <linux/export.h>
 #include <linux/sysctl.h>
+#include <linux/suspend.h>
 #include <linux/utsname.h>
 #include <trace/events/sched.h>
 #include <linux/sched/sysctl.h>
@@ -233,6 +234,28 @@
 }
 EXPORT_SYMBOL_GPL(reset_hung_task_detector);
 
+static bool hung_detector_suspended;
+
+static int hungtask_pm_notify(struct notifier_block *self,
+			      unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+	case PM_HIBERNATION_PREPARE:
+	case PM_RESTORE_PREPARE:
+		hung_detector_suspended = true;
+		break;
+	case PM_POST_SUSPEND:
+	case PM_POST_HIBERNATION:
+	case PM_POST_RESTORE:
+		hung_detector_suspended = false;
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
 /*
  * kthread which checks for tasks stuck in D state
  */
@@ -247,7 +270,8 @@
 		long t = hung_timeout_jiffies(hung_last_checked, timeout);
 
 		if (t <= 0) {
-			if (!atomic_xchg(&reset_hung_task, 0))
+			if (!atomic_xchg(&reset_hung_task, 0) &&
+			    !hung_detector_suspended)
 				check_hung_uninterruptible_tasks(timeout);
 			hung_last_checked = jiffies;
 			continue;
@@ -261,6 +285,10 @@
 static int __init hung_task_init(void)
 {
 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+	/* Disable hung task detector on suspend */
+	pm_notifier(hungtask_pm_notify, 0);
+
 	watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
 
 	return 0;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f30110e..9f13667 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -729,7 +729,11 @@
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
-	kstat_incr_irqs_this_cpu(desc);
+	/*
+	 * PER CPU interrupts are not serialized. Do not touch
+	 * desc->tot_count.
+	 */
+	__kstat_incr_irqs_this_cpu(desc);
 
 	if (chip->irq_ack)
 		chip->irq_ack(&desc->irq_data);
@@ -758,7 +762,11 @@
 	unsigned int irq = irq_desc_get_irq(desc);
 	irqreturn_t res;
 
-	kstat_incr_irqs_this_cpu(desc);
+	/*
+	 * PER CPU interrupts are not serialized. Do not touch
+	 * desc->tot_count.
+	 */
+	__kstat_incr_irqs_this_cpu(desc);
 
 	if (chip->irq_ack)
 		chip->irq_ack(&desc->irq_data);
@@ -1134,6 +1142,10 @@
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
 	data = data->parent_data;
+
+	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+		return 0;
+
 	if (data->chip->irq_set_wake)
 		return data->chip->irq_set_wake(data, on);
 
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index bc226e7..22e3f29 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -199,12 +199,18 @@
 
 #undef __irqd_to_state
 
-static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
 {
 	__this_cpu_inc(*desc->kstat_irqs);
 	__this_cpu_inc(kstat.irqs_sum);
 }
 
+static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+{
+	__kstat_incr_irqs_this_cpu(desc);
+	desc->tot_count++;
+}
+
 static inline int irq_desc_get_node(struct irq_desc *desc)
 {
 	return irq_common_data_get_node(&desc->irq_common_data);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 77977f55df..5e0ea17 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -109,6 +109,7 @@
 	desc->depth = 1;
 	desc->irq_count = 0;
 	desc->irqs_unhandled = 0;
+	desc->tot_count = 0;
 	desc->name = NULL;
 	desc->owner = owner;
 	for_each_possible_cpu(cpu)
@@ -880,11 +881,15 @@
 unsigned int kstat_irqs(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
-	int cpu;
 	unsigned int sum = 0;
+	int cpu;
 
 	if (!desc || !desc->kstat_irqs)
 		return 0;
+	if (!irq_settings_is_per_cpu_devid(desc) &&
+	    !irq_settings_is_per_cpu(desc))
+	    return desc->tot_count;
+
 	for_each_possible_cpu(cpu)
 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 	return sum;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 15d609b..be7f489 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -332,11 +332,10 @@
 	desc->affinity_notify = notify;
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
-	if (!notify && old_notify)
+	if (old_notify) {
 		cancel_work_sync(&old_notify->work);
-
-	if (old_notify)
 		kref_put(&old_notify->kref, old_notify->release);
+	}
 
 	return 0;
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f580352..e2845dd 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -668,7 +668,6 @@
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
 	struct optimized_kprobe *op;
-	int ret;
 
 	BUG_ON(!kprobe_unused(ap));
 	/*
@@ -682,9 +681,8 @@
 	/* Enable the probe again */
 	ap->flags &= ~KPROBE_FLAG_DISABLED;
 	/* Optimize it again (remove from op->list) */
-	ret = kprobe_optready(ap);
-	if (ret)
-		return ret;
+	if (!kprobe_optready(ap))
+		return -EINVAL;
 
 	optimize_kprobe(ap);
 	return 0;
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index f4a2fb8..8dfa7ea 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -119,6 +119,7 @@
 {
 	struct rwsem_waiter *waiter, *tmp;
 	long oldcount, woken = 0, adjustment = 0;
+	struct list_head wlist;
 
 	/*
 	 * Take a peek at the queue head waiter such that we can determine
@@ -177,18 +178,42 @@
 	 * of the queue. We know that woken will be at least 1 as we accounted
 	 * for above. Note we increment the 'active part' of the count by the
 	 * number of readers before waking any processes up.
+	 *
+	 * We have to do wakeup in 2 passes to prevent the possibility that
+	 * the reader count may be decremented before it is incremented. It
+	 * is because the to-be-woken waiter may not have slept yet. So it
+	 * may see waiter->task got cleared, finish its critical section and
+	 * do an unlock before the reader count increment.
+	 *
+	 * 1) Collect the read-waiters in a separate list, count them and
+	 *    fully increment the reader count in rwsem.
+	 * 2) For each waiters in the new list, clear waiter->task and
+	 *    put them into wake_q to be woken up later.
 	 */
-	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
-		struct task_struct *tsk;
-
+	list_for_each_entry(waiter, &sem->wait_list, list) {
 		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
 			break;
 
 		woken++;
-		tsk = waiter->task;
+	}
+	list_cut_before(&wlist, &sem->wait_list, &waiter->list);
 
+	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+	if (list_empty(&sem->wait_list)) {
+		/* hit end of list above */
+		adjustment -= RWSEM_WAITING_BIAS;
+	}
+
+	if (adjustment)
+		atomic_long_add(adjustment, &sem->count);
+
+	/* 2nd pass */
+	list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+		struct task_struct *tsk;
+
+		tsk = waiter->task;
 		get_task_struct(tsk);
-		list_del(&waiter->list);
+
 		/*
 		 * Ensure calling get_task_struct() before setting the reader
 		 * waiter to nil such that rwsem_down_read_failed() cannot
@@ -204,15 +229,6 @@
 		/* wake_q_add() already take the task ref */
 		put_task_struct(tsk);
 	}
-
-	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
-	if (list_empty(&sem->wait_list)) {
-		/* hit end of list above */
-		adjustment -= RWSEM_WAITING_BIAS;
-	}
-
-	if (adjustment)
-		atomic_long_add(adjustment, &sem->count);
 }
 
 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index 278d64b..bebc117 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -608,7 +608,7 @@
  */
 __visible void __stack_chk_fail(void)
 {
-	panic("stack-protector: Kernel stack is corrupted in: %p\n",
+	panic("stack-protector: Kernel stack is corrupted in: %pB\n",
 		__builtin_return_address(0));
 }
 EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index f39a7be9..efba851 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -258,6 +258,9 @@
 
 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 {
+	if (mode & PTRACE_MODE_SCHED)
+		return false;
+
 	if (mode & PTRACE_MODE_NOAUDIT)
 		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 	else
@@ -325,9 +328,16 @@
 	     !ptrace_has_cap(mm->user_ns, mode)))
 	    return -EPERM;
 
+	if (mode & PTRACE_MODE_SCHED)
+		return 0;
 	return security_ptrace_access_check(task, mode);
 }
 
+bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
+{
+	return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
+}
+
 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
 	int err;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 084c41c..7fd74ef 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1718,15 +1718,23 @@
 }
 
 /*
- * Awaken the grace-period kthread for the specified flavor of RCU.
- * Don't do a self-awaken, and don't bother awakening when there is
- * nothing for the grace-period kthread to do (as in several CPUs
- * raced to awaken, and we lost), and finally don't try to awaken
- * a kthread that has not yet been created.
+ * Awaken the grace-period kthread.  Don't do a self-awaken (unless in
+ * an interrupt or softirq handler), and don't bother awakening when there
+ * is nothing for the grace-period kthread to do (as in several CPUs raced
+ * to awaken, and we lost), and finally don't try to awaken a kthread that
+ * has not yet been created.  If all those checks are passed, track some
+ * debug information and awaken.
+ *
+ * So why do the self-wakeup when in an interrupt or softirq handler
+ * in the grace-period kthread's context?  Because the kthread might have
+ * been interrupted just as it was going to sleep, and just after the final
+ * pre-sleep check of the awaken condition.  In this case, a wakeup really
+ * is required, and is therefore supplied.
  */
 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 {
-	if (current == rsp->gp_kthread ||
+	if ((current == rsp->gp_kthread &&
+	     !in_interrupt() && !in_serving_softirq()) ||
 	    !READ_ONCE(rsp->gp_flags) ||
 	    !rsp->gp_kthread)
 		return;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
old mode 100644
new mode 100755
index 38cbc0d..134b6f8
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -28,3 +28,4 @@
 obj-$(CONFIG_CPU_FREQ) += cpufreq.o
 obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
+obj-$(CONFIG_PSI) += psi.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4910292..531adb8 100755
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -77,6 +77,8 @@
 #include <linux/prefetch.h>
 #include <linux/irq.h>
 #include <linux/cpufreq_times.h>
+#include <linux/sched/loadavg.h>
+#include <linux/cgroup-defs.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -176,21 +178,6 @@
 }
 
 /*
- * this_rq_lock - lock this runqueue and disable interrupts.
- */
-static struct rq *this_rq_lock(void)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	local_irq_disable();
-	rq = this_rq();
-	raw_spin_lock(&rq->lock);
-
-	return rq;
-}
-
-/*
  * __task_rq_lock - lock the rq @p resides on.
  */
 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -204,7 +191,7 @@
 		rq = task_rq(p);
 		raw_spin_lock(&rq->lock);
 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-			rf->cookie = lockdep_pin_lock(&rq->lock);
+			rq_pin_lock(rq, rf);
 			return rq;
 		}
 		raw_spin_unlock(&rq->lock);
@@ -244,7 +231,7 @@
 		 * pair with the WMB to ensure we must then also see migrating.
 		 */
 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-			rf->cookie = lockdep_pin_lock(&rq->lock);
+			rq_pin_lock(rq, rf);
 			return rq;
 		}
 		raw_spin_unlock(&rq->lock);
@@ -774,8 +761,10 @@
 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
 	update_rq_clock(rq);
-	if (!(flags & ENQUEUE_RESTORE))
+	if (!(flags & ENQUEUE_RESTORE)) {
 		sched_info_queued(rq, p);
+		psi_enqueue(p, flags & ENQUEUE_WAKEUP);
+	}
 	p->sched_class->enqueue_task(rq, p, flags);
 	walt_update_last_enqueue(p);
 	trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
@@ -784,8 +773,10 @@
 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
 	update_rq_clock(rq);
-	if (!(flags & DEQUEUE_SAVE))
+	if (!(flags & DEQUEUE_SAVE)) {
 		sched_info_dequeued(rq, p);
+		psi_dequeue(p, flags & DEQUEUE_SLEEP);
+	}
 	p->sched_class->dequeue_task(rq, p, flags);
 	trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
 }
@@ -1239,9 +1230,9 @@
 		 * OK, since we're going to drop the lock immediately
 		 * afterwards anyway.
 		 */
-		lockdep_unpin_lock(&rq->lock, rf.cookie);
+		rq_unpin_lock(rq, &rf);
 		rq = move_queued_task(rq, p, dest_cpu);
-		lockdep_repin_lock(&rq->lock, rf.cookie);
+		rq_repin_lock(rq, &rf);
 	}
 out:
 	task_rq_unlock(rq, p, &rf);
@@ -1757,7 +1748,7 @@
  * Mark the task runnable and perform wakeup-preemption.
  */
 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
-			   struct pin_cookie cookie)
+			   struct rq_flags *rf)
 {
 	check_preempt_curr(rq, p, wake_flags);
 	p->state = TASK_RUNNING;
@@ -1769,9 +1760,9 @@
 		 * Our task @p is fully woken up and running; so its safe to
 		 * drop the rq->lock, hereafter rq is only used for statistics.
 		 */
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, rf);
 		p->sched_class->task_woken(rq, p);
-		lockdep_repin_lock(&rq->lock, cookie);
+		rq_repin_lock(rq, rf);
 	}
 
 	if (rq->idle_stamp) {
@@ -1790,7 +1781,7 @@
 
 static void
 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
-		 struct pin_cookie cookie)
+		 struct rq_flags *rf)
 {
 	int en_flags = ENQUEUE_WAKEUP;
 
@@ -1805,7 +1796,7 @@
 #endif
 
 	ttwu_activate(rq, p, en_flags);
-	ttwu_do_wakeup(rq, p, wake_flags, cookie);
+	ttwu_do_wakeup(rq, p, wake_flags, rf);
 }
 
 /*
@@ -1824,7 +1815,7 @@
 	if (task_on_rq_queued(p)) {
 		/* check_preempt_curr() may use rq clock */
 		update_rq_clock(rq);
-		ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
+		ttwu_do_wakeup(rq, p, wake_flags, &rf);
 		ret = 1;
 	}
 	__task_rq_unlock(rq, &rf);
@@ -1837,15 +1828,15 @@
 {
 	struct rq *rq = this_rq();
 	struct llist_node *llist = llist_del_all(&rq->wake_list);
-	struct pin_cookie cookie;
 	struct task_struct *p;
 	unsigned long flags;
+	struct rq_flags rf;
 
 	if (!llist)
 		return;
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
-	cookie = lockdep_pin_lock(&rq->lock);
+	rq_pin_lock(rq, &rf);
 
 	while (llist) {
 		int wake_flags = 0;
@@ -1856,10 +1847,10 @@
 		if (p->sched_remote_wakeup)
 			wake_flags = WF_MIGRATED;
 
-		ttwu_do_activate(rq, p, wake_flags, cookie);
+		ttwu_do_activate(rq, p, wake_flags, &rf);
 	}
 
-	lockdep_unpin_lock(&rq->lock, cookie);
+	rq_unpin_lock(rq, &rf);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -1959,7 +1950,7 @@
 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 {
 	struct rq *rq = cpu_rq(cpu);
-	struct pin_cookie cookie;
+	struct rq_flags rf;
 
 #if defined(CONFIG_SMP)
 	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1970,9 +1961,9 @@
 #endif
 
 	raw_spin_lock(&rq->lock);
-	cookie = lockdep_pin_lock(&rq->lock);
-	ttwu_do_activate(rq, p, wake_flags, cookie);
-	lockdep_unpin_lock(&rq->lock, cookie);
+	rq_pin_lock(rq, &rf);
+	ttwu_do_activate(rq, p, wake_flags, &rf);
+	rq_unpin_lock(rq, &rf);
 	raw_spin_unlock(&rq->lock);
 }
 
@@ -2191,6 +2182,7 @@
 	src_cpu = task_cpu(p);
 	if (src_cpu != cpu) {
 		wake_flags |= WF_MIGRATED;
+		psi_ttwu_dequeue(p);
 		set_task_cpu(p, cpu);
 		notif_required = true;
 	}
@@ -2225,7 +2217,7 @@
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.
  */
-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
 {
 	struct rq *rq = task_rq(p);
 
@@ -2246,11 +2238,11 @@
 		 * disabled avoiding further scheduler activity on it and we've
 		 * not yet picked a replacement task.
 		 */
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, rf);
 		raw_spin_unlock(&rq->lock);
 		raw_spin_lock(&p->pi_lock);
 		raw_spin_lock(&rq->lock);
-		lockdep_repin_lock(&rq->lock, cookie);
+		rq_repin_lock(rq, rf);
 	}
 
 	if (!(p->state & TASK_NORMAL))
@@ -2267,7 +2259,7 @@
 		note_task_waking(p, wallclock);
 	}
 
-	ttwu_do_wakeup(rq, p, 0, cookie);
+	ttwu_do_wakeup(rq, p, 0, rf);
 	ttwu_stat(p, smp_processor_id(), 0);
 out:
 	raw_spin_unlock(&p->pi_lock);
@@ -2724,9 +2716,9 @@
 		 * Nothing relies on rq->lock after this, so its fine to
 		 * drop it.
 		 */
-		lockdep_unpin_lock(&rq->lock, rf.cookie);
+		rq_unpin_lock(rq, &rf);
 		p->sched_class->task_woken(rq, p);
-		lockdep_repin_lock(&rq->lock, rf.cookie);
+		rq_repin_lock(rq, &rf);
 	}
 #endif
 	task_rq_unlock(rq, p, &rf);
@@ -2995,7 +2987,7 @@
  */
 static __always_inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
-	       struct task_struct *next, struct pin_cookie cookie)
+	       struct task_struct *next, struct rq_flags *rf)
 {
 	struct mm_struct *mm, *oldmm;
 
@@ -3027,7 +3019,7 @@
 	 * of the scheduler it's an obvious special-case), so we
 	 * do an early lockdep release here:
 	 */
-	lockdep_unpin_lock(&rq->lock, cookie);
+	rq_unpin_lock(rq, rf);
 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 
 	/* Here we just switch the register state and the stack. */
@@ -3270,6 +3262,9 @@
 		flag = SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_EARLY_DET;
 
 	cpufreq_update_util(rq, flag);
+
+	psi_task_tick(rq);
+
 	raw_spin_unlock(&rq->lock);
 
 	if (early_notif)
@@ -3491,7 +3486,7 @@
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	const struct sched_class *class = &fair_sched_class;
 	struct task_struct *p;
@@ -3502,20 +3497,20 @@
 	 */
 	if (likely(prev->sched_class == class &&
 		   rq->nr_running == rq->cfs.h_nr_running)) {
-		p = fair_sched_class.pick_next_task(rq, prev, cookie);
+		p = fair_sched_class.pick_next_task(rq, prev, rf);
 		if (unlikely(p == RETRY_TASK))
 			goto again;
 
 		/* assumes fair_sched_class->next == idle_sched_class */
 		if (unlikely(!p))
-			p = idle_sched_class.pick_next_task(rq, prev, cookie);
+			p = idle_sched_class.pick_next_task(rq, prev, rf);
 
 		return p;
 	}
 
 again:
 	for_each_class(class) {
-		p = class->pick_next_task(rq, prev, cookie);
+		p = class->pick_next_task(rq, prev, rf);
 		if (p) {
 			if (unlikely(p == RETRY_TASK))
 				goto again;
@@ -3569,7 +3564,7 @@
 {
 	struct task_struct *prev, *next;
 	unsigned long *switch_count;
-	struct pin_cookie cookie;
+	struct rq_flags rf;
 	struct rq *rq;
 	int cpu;
 	u64 wallclock;
@@ -3593,7 +3588,7 @@
 	 */
 	smp_mb__before_spinlock();
 	raw_spin_lock(&rq->lock);
-	cookie = lockdep_pin_lock(&rq->lock);
+	rq_pin_lock(rq, &rf);
 
 	rq->clock_skip_update <<= 1; /* promote REQ to ACT */
 
@@ -3615,7 +3610,7 @@
 
 				to_wakeup = wq_worker_sleeping(prev);
 				if (to_wakeup)
-					try_to_wake_up_local(to_wakeup, cookie);
+					try_to_wake_up_local(to_wakeup, &rf);
 			}
 		}
 		switch_count = &prev->nvcsw;
@@ -3624,12 +3619,12 @@
 	if (task_on_rq_queued(prev))
 		update_rq_clock(rq);
 
-	next = pick_next_task(rq, prev, cookie);
+	next = pick_next_task(rq, prev, &rf);
+	wallclock = sched_ktime_clock();
 	clear_tsk_need_resched(prev);
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
 
-	wallclock = sched_ktime_clock();
 	if (likely(prev != next)) {
 		if (!prev->on_rq)
 			prev->last_sleep_ts = wallclock;
@@ -3641,10 +3636,10 @@
 		++*switch_count;
 
 		trace_sched_switch(preempt, prev, next);
-		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
+		rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */
 	} else {
 		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, &rf);
 		raw_spin_unlock_irq(&rq->lock);
 	}
 
@@ -5144,7 +5139,10 @@
  */
 SYSCALL_DEFINE0(sched_yield)
 {
-	struct rq *rq = this_rq_lock();
+	struct rq_flags rf;
+	struct rq *rq;
+
+	rq = this_rq_lock_irq(&rf);
 
 	schedstat_inc(rq->yld_count);
 	current->sched_class->yield_task(rq);
@@ -5153,9 +5151,8 @@
 	 * Since we are going to call schedule() anyway, there's
 	 * no need to preempt or enable interrupts:
 	 */
-	__release(rq->lock);
-	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-	do_raw_spin_unlock(&rq->lock);
+	preempt_disable();
+	rq_unlock(rq, &rf);
 	sched_preempt_enable_no_resched();
 
 	schedule();
@@ -5815,7 +5812,7 @@
 {
 	struct rq *rq = dead_rq;
 	struct task_struct *next, *stop = rq->stop;
-	struct pin_cookie cookie;
+	struct rq_flags rf;
 	int dest_cpu;
 	unsigned int num_pinned_kthreads = 1; /* this thread */
 	LIST_HEAD(tasks);
@@ -5852,8 +5849,8 @@
 		/*
 		 * pick_next_task assumes pinned rq->lock.
 		 */
-		cookie = lockdep_pin_lock(&rq->lock);
-		next = pick_next_task(rq, &fake_task, cookie);
+		rq_pin_lock(rq, &rf);
+		next = pick_next_task(rq, &fake_task, &rf);
 		BUG_ON(!next);
 		next->sched_class->put_prev_task(rq, next);
 
@@ -5861,7 +5858,7 @@
 			!cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
 			detach_one_task(next, rq, &tasks);
 			num_pinned_kthreads += 1;
-			lockdep_unpin_lock(&rq->lock, cookie);
+			rq_unpin_lock(rq, &rf);
 			continue;
 		}
 
@@ -5874,7 +5871,7 @@
 		 * because !cpu_active at this point, which means load-balance
 		 * will not interfere. Also, stop-machine.
 		 */
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, &rf);
 		raw_spin_unlock(&rq->lock);
 		raw_spin_lock(&next->pi_lock);
 		raw_spin_lock(&rq->lock);
@@ -8005,11 +8002,22 @@
 	return 0;
 }
 
+#ifdef CONFIG_SCHED_SMT
+atomic_t sched_smt_present = ATOMIC_INIT(0);
+#endif
+
 int sched_cpu_activate(unsigned int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
 
+#ifdef CONFIG_SCHED_SMT
+	/*
+	 * When going up, increment the number of cores with SMT present.
+	 */
+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+		atomic_inc(&sched_smt_present);
+#endif
 	set_cpu_active(cpu, true);
 
 	if (sched_smp_initialized) {
@@ -8059,6 +8067,14 @@
 	else
 		synchronize_rcu();
 
+#ifdef CONFIG_SCHED_SMT
+	/*
+	 * When going down, decrement the number of cores with SMT present.
+	 */
+	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+		atomic_dec(&sched_smt_present);
+#endif
+
 	if (!sched_smp_initialized)
 		return 0;
 
@@ -8383,6 +8399,8 @@
 
 	init_schedstats();
 
+	psi_init();
+
 	scheduler_running = 1;
 }
 
@@ -9088,6 +9106,17 @@
 }
 #endif
 
+void threadgroup_change_begin(struct task_struct *tsk)
+{
+	might_sleep();
+	cgroup_threadgroup_change_begin(tsk);
+}
+
+void threadgroup_change_end(struct task_struct *tsk)
+{
+	cgroup_threadgroup_change_end(tsk);
+}
+
 #ifdef CONFIG_CGROUP_SCHED
 
 inline struct task_group *css_tg(struct cgroup_subsys_state *css)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d7c7dd6..ba9dce4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -749,9 +749,9 @@
 		 * Nothing relies on rq->lock after this, so its safe to drop
 		 * rq->lock.
 		 */
-		lockdep_unpin_lock(&rq->lock, rf.cookie);
+		rq_unpin_lock(rq, &rf);
 		push_dl_task(rq);
-		lockdep_repin_lock(&rq->lock, rf.cookie);
+		rq_repin_lock(rq, &rf);
 	}
 #endif
 
@@ -1248,7 +1248,7 @@
 }
 
 struct task_struct *
-pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	struct sched_dl_entity *dl_se;
 	struct task_struct *p;
@@ -1263,9 +1263,9 @@
 		 * disabled avoiding further scheduler activity on it and we're
 		 * being very careful to re-start the picking loop.
 		 */
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, rf);
 		pull_dl_task(rq);
-		lockdep_repin_lock(&rq->lock, cookie);
+		rq_repin_lock(rq, rf);
 		/*
 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
 		 * means a stop task can slip in, in which case we need to
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 08391ff..3f863a1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2012,6 +2012,10 @@
 	if (p->last_task_numa_placement) {
 		delta = runtime - p->last_sum_exec_runtime;
 		*period = now - p->last_task_numa_placement;
+
+		/* Avoid time going backwards, prevent potential divide error: */
+		if (unlikely((s64)*period < 0))
+			*period = 0;
 	} else {
 		delta = p->se.avg.load_sum / p->se.load.weight;
 		*period = LOAD_AVG_MAX;
@@ -4671,12 +4675,15 @@
 	return HRTIMER_NORESTART;
 }
 
+extern const u64 max_cfs_quota_period;
+
 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
 	struct cfs_bandwidth *cfs_b =
 		container_of(timer, struct cfs_bandwidth, period_timer);
 	int overrun;
 	int idle = 0;
+	int count = 0;
 
 	raw_spin_lock(&cfs_b->lock);
 	for (;;) {
@@ -4684,6 +4691,28 @@
 		if (!overrun)
 			break;
 
+		if (++count > 3) {
+			u64 new, old = ktime_to_ns(cfs_b->period);
+
+			new = (old * 147) / 128; /* ~115% */
+			new = min(new, max_cfs_quota_period);
+
+			cfs_b->period = ns_to_ktime(new);
+
+			/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+			cfs_b->quota *= new;
+			cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+			pr_warn_ratelimited(
+        "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+	                        smp_processor_id(),
+	                        div_u64(new, NSEC_PER_USEC),
+                                div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+			/* reset count so we don't come right back in here */
+			count = 0;
+		}
+
 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
 	}
 	if (idle)
@@ -5927,7 +5956,8 @@
 					cpu_count--;
 				}
 
-				if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top)))
+				if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top)) &&
+					sd->child)
 					goto next_cpu;
 
 			} while (sg = sg->next, sg != sd->groups);
@@ -7891,7 +7921,7 @@
 }
 
 static struct task_struct *
-pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	struct cfs_rq *cfs_rq = &rq->cfs;
 	struct sched_entity *se;
@@ -8009,9 +8039,9 @@
 	 * further scheduler activity on it and we're being very careful to
 	 * re-start the picking loop.
 	 */
-	lockdep_unpin_lock(&rq->lock, cookie);
+	rq_unpin_lock(rq, rf);
 	new_tasks = idle_balance(rq);
-	lockdep_repin_lock(&rq->lock, cookie);
+	rq_repin_lock(rq, rf);
 	/*
 	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
 	 * possible for any higher priority task to appear. In that case we
@@ -8702,10 +8732,10 @@
 	if (cfs_rq->last_h_load_update == now)
 		return;
 
-	cfs_rq->h_load_next = NULL;
+	WRITE_ONCE(cfs_rq->h_load_next, NULL);
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
-		cfs_rq->h_load_next = se;
+		WRITE_ONCE(cfs_rq->h_load_next, se);
 		if (cfs_rq->last_h_load_update == now)
 			break;
 	}
@@ -8715,7 +8745,7 @@
 		cfs_rq->last_h_load_update = now;
 	}
 
-	while ((se = cfs_rq->h_load_next) != NULL) {
+	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
 		load = cfs_rq->h_load;
 		load = div64_ul(load * se->avg.load_avg,
 			cfs_rq_load_avg(cfs_rq) + 1);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 5405d3f..0c00172 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -24,7 +24,7 @@
 }
 
 static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	put_prev_task(rq, prev);
 	update_idle_core(rq);
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index ec91fcc..5f3812c 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/sched/loadavg.h>
 
 #include "sched.h"
 
@@ -93,19 +94,73 @@
 	return delta;
 }
 
-/*
- * a1 = a0 * e + a * (1 - e)
+/**
+ * fixed_power_int - compute: x^n, in O(log n) time
+ *
+ * @x:         base of the power
+ * @frac_bits: fractional bits of @x
+ * @n:         power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
  */
 static unsigned long
-calc_load(unsigned long load, unsigned long exp, unsigned long active)
+fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
 {
-	unsigned long newload;
+	unsigned long result = 1UL << frac_bits;
 
-	newload = load * exp + active * (FIXED_1 - exp);
-	if (active >= load)
-		newload += FIXED_1-1;
+	if (n) {
+		for (;;) {
+			if (n & 1) {
+				result *= x;
+				result += 1UL << (frac_bits - 1);
+				result >>= frac_bits;
+			}
+			n >>= 1;
+			if (!n)
+				break;
+			x *= x;
+			x += 1UL << (frac_bits - 1);
+			x >>= frac_bits;
+		}
+	}
 
-	return newload / FIXED_1;
+	return result;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ *
+ * a2 = a1 * e + a * (1 - e)
+ *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
+ *    = a0 * e^2 + a * (1 - e) * (1 + e)
+ *
+ * a3 = a2 * e + a * (1 - e)
+ *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
+ *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
+ *
+ *  ...
+ *
+ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
+ *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
+ *    = a0 * e^n + a * (1 - e^n)
+ *
+ * [1] application of the geometric series:
+ *
+ *              n         1 - x^(n+1)
+ *     S_n := \Sum x^i = -------------
+ *             i=0          1 - x
+ */
+unsigned long
+calc_load_n(unsigned long load, unsigned long exp,
+	    unsigned long active, unsigned int n)
+{
+	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -227,75 +282,6 @@
 	return delta;
 }
 
-/**
- * fixed_power_int - compute: x^n, in O(log n) time
- *
- * @x:         base of the power
- * @frac_bits: fractional bits of @x
- * @n:         power to raise @x to.
- *
- * By exploiting the relation between the definition of the natural power
- * function: x^n := x*x*...*x (x multiplied by itself for n times), and
- * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
- * (where: n_i \elem {0, 1}, the binary vector representing n),
- * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
- * of course trivially computable in O(log_2 n), the length of our binary
- * vector.
- */
-static unsigned long
-fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
-{
-	unsigned long result = 1UL << frac_bits;
-
-	if (n) {
-		for (;;) {
-			if (n & 1) {
-				result *= x;
-				result += 1UL << (frac_bits - 1);
-				result >>= frac_bits;
-			}
-			n >>= 1;
-			if (!n)
-				break;
-			x *= x;
-			x += 1UL << (frac_bits - 1);
-			x >>= frac_bits;
-		}
-	}
-
-	return result;
-}
-
-/*
- * a1 = a0 * e + a * (1 - e)
- *
- * a2 = a1 * e + a * (1 - e)
- *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
- *    = a0 * e^2 + a * (1 - e) * (1 + e)
- *
- * a3 = a2 * e + a * (1 - e)
- *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
- *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
- *
- *  ...
- *
- * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
- *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
- *    = a0 * e^n + a * (1 - e^n)
- *
- * [1] application of the geometric series:
- *
- *              n         1 - x^(n+1)
- *     S_n := \Sum x^i = -------------
- *             i=0          1 - x
- */
-static unsigned long
-calc_load_n(unsigned long load, unsigned long exp,
-	    unsigned long active, unsigned int n)
-{
-	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
-}
-
 /*
  * NO_HZ can leave us missing all per-cpu ticks calling
  * calc_load_account_active(), but since an idle CPU folds its delta into
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
new file mode 100644
index 0000000..ced7587
--- /dev/null
+++ b/kernel/sched/psi.c
@@ -0,0 +1,1280 @@
+/*
+ * Pressure stall information for CPU, memory and IO
+ *
+ * Copyright (c) 2018 Facebook, Inc.
+ * Author: Johannes Weiner <hannes@cmpxchg.org>
+ *
+ * Polling support by Suren Baghdasaryan <surenb@google.com>
+ * Copyright (c) 2018 Google, Inc.
+ *
+ * When CPU, memory and IO are contended, tasks experience delays that
+ * reduce throughput and introduce latencies into the workload. Memory
+ * and IO contention, in addition, can cause a full loss of forward
+ * progress in which the CPU goes idle.
+ *
+ * This code aggregates individual task delays into resource pressure
+ * metrics that indicate problems with both workload health and
+ * resource utilization.
+ *
+ *			Model
+ *
+ * The time in which a task can execute on a CPU is our baseline for
+ * productivity. Pressure expresses the amount of time in which this
+ * potential cannot be realized due to resource contention.
+ *
+ * This concept of productivity has two components: the workload and
+ * the CPU. To measure the impact of pressure on both, we define two
+ * contention states for a resource: SOME and FULL.
+ *
+ * In the SOME state of a given resource, one or more tasks are
+ * delayed on that resource. This affects the workload's ability to
+ * perform work, but the CPU may still be executing other tasks.
+ *
+ * In the FULL state of a given resource, all non-idle tasks are
+ * delayed on that resource such that nobody is advancing and the CPU
+ * goes idle. This leaves both workload and CPU unproductive.
+ *
+ * (Naturally, the FULL state doesn't exist for the CPU resource.)
+ *
+ *	SOME = nr_delayed_tasks != 0
+ *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
+ *
+ * The percentage of wallclock time spent in those compound stall
+ * states gives pressure numbers between 0 and 100 for each resource,
+ * where the SOME percentage indicates workload slowdowns and the FULL
+ * percentage indicates reduced CPU utilization:
+ *
+ *	%SOME = time(SOME) / period
+ *	%FULL = time(FULL) / period
+ *
+ *			Multiple CPUs
+ *
+ * The more tasks and available CPUs there are, the more work can be
+ * performed concurrently. This means that the potential that can go
+ * unrealized due to resource contention *also* scales with non-idle
+ * tasks and CPUs.
+ *
+ * Consider a scenario where 257 number crunching tasks are trying to
+ * run concurrently on 256 CPUs. If we simply aggregated the task
+ * states, we would have to conclude a CPU SOME pressure number of
+ * 100%, since *somebody* is waiting on a runqueue at all
+ * times. However, that is clearly not the amount of contention the
+ * workload is experiencing: only one out of 256 possible exceution
+ * threads will be contended at any given time, or about 0.4%.
+ *
+ * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
+ * given time *one* of the tasks is delayed due to a lack of memory.
+ * Again, looking purely at the task state would yield a memory FULL
+ * pressure number of 0%, since *somebody* is always making forward
+ * progress. But again this wouldn't capture the amount of execution
+ * potential lost, which is 1 out of 4 CPUs, or 25%.
+ *
+ * To calculate wasted potential (pressure) with multiple processors,
+ * we have to base our calculation on the number of non-idle tasks in
+ * conjunction with the number of available CPUs, which is the number
+ * of potential execution threads. SOME becomes then the proportion of
+ * delayed tasks to possibe threads, and FULL is the share of possible
+ * threads that are unproductive due to delays:
+ *
+ *	threads = min(nr_nonidle_tasks, nr_cpus)
+ *	   SOME = min(nr_delayed_tasks / threads, 1)
+ *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
+ *
+ * For the 257 number crunchers on 256 CPUs, this yields:
+ *
+ *	threads = min(257, 256)
+ *	   SOME = min(1 / 256, 1)             = 0.4%
+ *	   FULL = (256 - min(257, 256)) / 256 = 0%
+ *
+ * For the 1 out of 4 memory-delayed tasks, this yields:
+ *
+ *	threads = min(4, 4)
+ *	   SOME = min(1 / 4, 1)               = 25%
+ *	   FULL = (4 - min(3, 4)) / 4         = 25%
+ *
+ * [ Substitute nr_cpus with 1, and you can see that it's a natural
+ *   extension of the single-CPU model. ]
+ *
+ *			Implementation
+ *
+ * To assess the precise time spent in each such state, we would have
+ * to freeze the system on task changes and start/stop the state
+ * clocks accordingly. Obviously that doesn't scale in practice.
+ *
+ * Because the scheduler aims to distribute the compute load evenly
+ * among the available CPUs, we can track task state locally to each
+ * CPU and, at much lower frequency, extrapolate the global state for
+ * the cumulative stall times and the running averages.
+ *
+ * For each runqueue, we track:
+ *
+ *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
+ *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
+ *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
+ *
+ * and then periodically aggregate:
+ *
+ *	tNONIDLE = sum(tNONIDLE[i])
+ *
+ *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
+ *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
+ *
+ *	   %SOME = tSOME / period
+ *	   %FULL = tFULL / period
+ *
+ * This gives us an approximation of pressure that is practical
+ * cost-wise, yet way more sensitive and accurate than periodic
+ * sampling of the aggregate task states would be.
+ */
+
+#include "../workqueue_internal.h"
+#include <linux/sched/loadavg.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/seqlock.h>
+#include <linux/uaccess.h>
+#include <linux/cgroup.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/psi.h>
+#include "sched.h"
+
+static int psi_bug __read_mostly;
+
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+static bool psi_enable;
+#else
+static bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+	return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
+
+/* Running averages - we need to be higher-res than loadavg */
+#define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
+#define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
+#define EXP_60s		1981		/* 1/exp(2s/60s) */
+#define EXP_300s	2034		/* 1/exp(2s/300s) */
+
+/* PSI trigger definitions */
+#define WINDOW_MIN_US 500000	/* Min window size is 500ms */
+#define WINDOW_MAX_US 10000000	/* Max window size is 10s */
+#define UPDATES_PER_WINDOW 10	/* 10 updates per window */
+
+/* Sampling frequency in nanoseconds */
+static u64 psi_period __read_mostly;
+
+/* System-level pressure and stall tracking */
+static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
+static struct psi_group psi_system = {
+	.pcpu = &system_group_pcpu,
+};
+
+static void psi_avgs_work(struct work_struct *work);
+
+static void group_init(struct psi_group *group)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
+	group->avg_next_update = sched_clock() + psi_period;
+	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+	mutex_init(&group->avgs_lock);
+	/* Init trigger-related members */
+	atomic_set(&group->poll_scheduled, 0);
+	mutex_init(&group->trigger_lock);
+	INIT_LIST_HEAD(&group->triggers);
+	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
+	group->poll_states = 0;
+	group->poll_min_period = U32_MAX;
+	memset(group->polling_total, 0, sizeof(group->polling_total));
+	group->polling_next_update = ULLONG_MAX;
+	group->polling_until = 0;
+	rcu_assign_pointer(group->poll_kworker, NULL);
+}
+
+void __init psi_init(void)
+{
+	if (!psi_enable) {
+		static_branch_enable(&psi_disabled);
+		return;
+	}
+
+	psi_period = jiffies_to_nsecs(PSI_FREQ);
+	group_init(&psi_system);
+}
+
+static bool test_state(unsigned int *tasks, enum psi_states state)
+{
+	switch (state) {
+	case PSI_IO_SOME:
+		return tasks[NR_IOWAIT];
+	case PSI_IO_FULL:
+		return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
+	case PSI_MEM_SOME:
+		return tasks[NR_MEMSTALL];
+	case PSI_MEM_FULL:
+		return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
+	case PSI_CPU_SOME:
+		return tasks[NR_RUNNING] > 1;
+	case PSI_NONIDLE:
+		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
+			tasks[NR_RUNNING];
+	default:
+		return false;
+	}
+}
+
+static void get_recent_times(struct psi_group *group, int cpu,
+			     enum psi_aggregators aggregator, u32 *times,
+			     u32 *pchanged_states)
+{
+	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
+	u64 now, state_start;
+	enum psi_states s;
+	unsigned int seq;
+	u32 state_mask;
+
+	*pchanged_states = 0;
+
+	/* Snapshot a coherent view of the CPU state */
+	do {
+		seq = read_seqcount_begin(&groupc->seq);
+		now = cpu_clock(cpu);
+		memcpy(times, groupc->times, sizeof(groupc->times));
+		state_mask = groupc->state_mask;
+		state_start = groupc->state_start;
+	} while (read_seqcount_retry(&groupc->seq, seq));
+
+	/* Calculate state time deltas against the previous snapshot */
+	for (s = 0; s < NR_PSI_STATES; s++) {
+		u32 delta;
+		/*
+		 * In addition to already concluded states, we also
+		 * incorporate currently active states on the CPU,
+		 * since states may last for many sampling periods.
+		 *
+		 * This way we keep our delta sampling buckets small
+		 * (u32) and our reported pressure close to what's
+		 * actually happening.
+		 */
+		if (state_mask & (1 << s))
+			times[s] += now - state_start;
+
+		delta = times[s] - groupc->times_prev[aggregator][s];
+		groupc->times_prev[aggregator][s] = times[s];
+
+		times[s] = delta;
+		if (delta)
+			*pchanged_states |= (1 << s);
+	}
+}
+
+static void calc_avgs(unsigned long avg[3], int missed_periods,
+		      u64 time, u64 period)
+{
+	unsigned long pct;
+
+	/* Fill in zeroes for periods of no activity */
+	if (missed_periods) {
+		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
+		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
+		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
+	}
+
+	/* Sample the most recent active period */
+	pct = div_u64(time * 100, period);
+	pct *= FIXED_1;
+	avg[0] = calc_load(avg[0], EXP_10s, pct);
+	avg[1] = calc_load(avg[1], EXP_60s, pct);
+	avg[2] = calc_load(avg[2], EXP_300s, pct);
+}
+
+static void collect_percpu_times(struct psi_group *group,
+				 enum psi_aggregators aggregator,
+				 u32 *pchanged_states)
+{
+	u64 deltas[NR_PSI_STATES - 1] = { 0, };
+	unsigned long nonidle_total = 0;
+	u32 changed_states = 0;
+	int cpu;
+	int s;
+
+	/*
+	 * Collect the per-cpu time buckets and average them into a
+	 * single time sample that is normalized to wallclock time.
+	 *
+	 * For averaging, each CPU is weighted by its non-idle time in
+	 * the sampling period. This eliminates artifacts from uneven
+	 * loading, or even entirely idle CPUs.
+	 */
+	for_each_possible_cpu(cpu) {
+		u32 times[NR_PSI_STATES];
+		u32 nonidle;
+		u32 cpu_changed_states;
+
+		get_recent_times(group, cpu, aggregator, times,
+				&cpu_changed_states);
+		changed_states |= cpu_changed_states;
+
+		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
+		nonidle_total += nonidle;
+
+		for (s = 0; s < PSI_NONIDLE; s++)
+			deltas[s] += (u64)times[s] * nonidle;
+	}
+
+	/*
+	 * Integrate the sample into the running statistics that are
+	 * reported to userspace: the cumulative stall times and the
+	 * decaying averages.
+	 *
+	 * Pressure percentages are sampled at PSI_FREQ. We might be
+	 * called more often when the user polls more frequently than
+	 * that; we might be called less often when there is no task
+	 * activity, thus no data, and clock ticks are sporadic. The
+	 * below handles both.
+	 */
+
+	/* total= */
+	for (s = 0; s < NR_PSI_STATES - 1; s++)
+		group->total[aggregator][s] +=
+				div_u64(deltas[s], max(nonidle_total, 1UL));
+
+	if (pchanged_states)
+		*pchanged_states = changed_states;
+}
+
+static u64 update_averages(struct psi_group *group, u64 now)
+{
+	unsigned long missed_periods = 0;
+	u64 expires, period;
+	u64 avg_next_update;
+	int s;
+
+	/* avgX= */
+	expires = group->avg_next_update;
+	if (now - expires >= psi_period)
+		missed_periods = div_u64(now - expires, psi_period);
+
+	/*
+	 * The periodic clock tick can get delayed for various
+	 * reasons, especially on loaded systems. To avoid clock
+	 * drift, we schedule the clock in fixed psi_period intervals.
+	 * But the deltas we sample out of the per-cpu buckets above
+	 * are based on the actual time elapsing between clock ticks.
+	 */
+	avg_next_update = expires + ((1 + missed_periods) * psi_period);
+	period = now - (group->avg_last_update + (missed_periods * psi_period));
+	group->avg_last_update = now;
+
+	for (s = 0; s < NR_PSI_STATES - 1; s++) {
+		u32 sample;
+
+		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
+		/*
+		 * Due to the lockless sampling of the time buckets,
+		 * recorded time deltas can slip into the next period,
+		 * which under full pressure can result in samples in
+		 * excess of the period length.
+		 *
+		 * We don't want to report non-sensical pressures in
+		 * excess of 100%, nor do we want to drop such events
+		 * on the floor. Instead we punt any overage into the
+		 * future until pressure subsides. By doing this we
+		 * don't underreport the occurring pressure curve, we
+		 * just report it delayed by one period length.
+		 *
+		 * The error isn't cumulative. As soon as another
+		 * delta slips from a period P to P+1, by definition
+		 * it frees up its time T in P.
+		 */
+		if (sample > period)
+			sample = period;
+		group->avg_total[s] += sample;
+		calc_avgs(group->avg[s], missed_periods, sample, period);
+	}
+
+	return avg_next_update;
+}
+
+static void psi_avgs_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	bool nonidle;
+	u64 now;
+
+	dwork = to_delayed_work(work);
+	group = container_of(dwork, struct psi_group, avgs_work);
+
+	mutex_lock(&group->avgs_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_AVGS, &changed_states);
+	nonidle = changed_states & (1 << PSI_NONIDLE);
+	/*
+	 * If there is task activity, periodically fold the per-cpu
+	 * times and feed samples into the running averages. If things
+	 * are idle and there is no data to process, stop the clock.
+	 * Once restarted, we'll catch up the running averages in one
+	 * go - see calc_avgs() and missed_periods.
+	 */
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
+
+	if (nonidle) {
+		schedule_delayed_work(dwork, nsecs_to_jiffies(
+				group->avg_next_update - now) + 1);
+	}
+
+	mutex_unlock(&group->avgs_lock);
+}
+
+/* Trigger tracking window manupulations */
+static void window_reset(struct psi_window *win, u64 now, u64 value,
+			 u64 prev_growth)
+{
+	win->start_time = now;
+	win->start_value = value;
+	win->prev_growth = prev_growth;
+}
+
+/*
+ * PSI growth tracking window update and growth calculation routine.
+ *
+ * This approximates a sliding tracking window by interpolating
+ * partially elapsed windows using historical growth data from the
+ * previous intervals. This minimizes memory requirements (by not storing
+ * all the intermediate values in the previous window) and simplifies
+ * the calculations. It works well because PSI signal changes only in
+ * positive direction and over relatively small window sizes the growth
+ * is close to linear.
+ */
+static u64 window_update(struct psi_window *win, u64 now, u64 value)
+{
+	u64 elapsed;
+	u64 growth;
+
+	elapsed = now - win->start_time;
+	growth = value - win->start_value;
+	/*
+	 * After each tracking window passes win->start_value and
+	 * win->start_time get reset and win->prev_growth stores
+	 * the average per-window growth of the previous window.
+	 * win->prev_growth is then used to interpolate additional
+	 * growth from the previous window assuming it was linear.
+	 */
+	if (elapsed > win->size)
+		window_reset(win, now, value, growth);
+	else {
+		u32 remaining;
+
+		remaining = win->size - elapsed;
+		growth += div_u64(win->prev_growth * remaining, win->size);
+	}
+
+	return growth;
+}
+
+static void init_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+
+	list_for_each_entry(t, &group->triggers, node)
+		window_reset(&t->win, now,
+				group->total[PSI_POLL][t->state], 0);
+	memcpy(group->polling_total, group->total[PSI_POLL],
+		   sizeof(group->polling_total));
+	group->polling_next_update = now + group->poll_min_period;
+}
+
+static u64 update_triggers(struct psi_group *group, u64 now)
+{
+	struct psi_trigger *t;
+	bool new_stall = false;
+	u64 *total = group->total[PSI_POLL];
+
+	/*
+	 * On subsequent updates, calculate growth deltas and let
+	 * watchers know when their specified thresholds are exceeded.
+	 */
+	list_for_each_entry(t, &group->triggers, node) {
+		u64 growth;
+
+		/* Check for stall activity */
+		if (group->polling_total[t->state] == total[t->state])
+			continue;
+
+		/*
+		 * Multiple triggers might be looking at the same state,
+		 * remember to update group->polling_total[] once we've
+		 * been through all of them. Also remember to extend the
+		 * polling time if we see new stall activity.
+		 */
+		new_stall = true;
+
+		/* Calculate growth since last update */
+		growth = window_update(&t->win, now, total[t->state]);
+		if (growth < t->threshold)
+			continue;
+
+		/* Limit event signaling to once per window */
+		if (now < t->last_event_time + t->win.size)
+			continue;
+
+		/* Generate an event */
+		if (cmpxchg(&t->event, 0, 1) == 0)
+			wake_up_interruptible(&t->event_wait);
+		t->last_event_time = now;
+	}
+
+	if (new_stall)
+		memcpy(group->polling_total, total,
+				sizeof(group->polling_total));
+
+	return now + group->poll_min_period;
+}
+
+/*
+ * Schedule polling if it's not already scheduled. It's safe to call even from
+ * hotpath because even though kthread_queue_delayed_work takes worker->lock
+ * spinlock that spinlock is never contended due to poll_scheduled atomic
+ * preventing such competition.
+ */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+{
+	struct kthread_worker *kworker;
+
+	/* Do not reschedule if already scheduled */
+	if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+		return;
+
+	rcu_read_lock();
+
+	kworker = rcu_dereference(group->poll_kworker);
+	/*
+	 * kworker might be NULL in case psi_trigger_destroy races with
+	 * psi_task_change (hotpath) which can't use locks
+	 */
+	if (likely(kworker))
+		kthread_queue_delayed_work(kworker, &group->poll_work, delay);
+	else
+		atomic_set(&group->poll_scheduled, 0);
+
+	rcu_read_unlock();
+}
+
+static void psi_poll_work(struct kthread_work *work)
+{
+	struct kthread_delayed_work *dwork;
+	struct psi_group *group;
+	u32 changed_states;
+	u64 now;
+
+	dwork = container_of(work, struct kthread_delayed_work, work);
+	group = container_of(dwork, struct psi_group, poll_work);
+
+	atomic_set(&group->poll_scheduled, 0);
+
+	mutex_lock(&group->trigger_lock);
+
+	now = sched_clock();
+
+	collect_percpu_times(group, PSI_POLL, &changed_states);
+
+	if (changed_states & group->poll_states) {
+		/* Initialize trigger windows when entering polling mode */
+		if (now > group->polling_until)
+			init_triggers(group, now);
+
+		/*
+		 * Keep the monitor active for at least the duration of the
+		 * minimum tracking window as long as monitor states are
+		 * changing.
+		 */
+		group->polling_until = now +
+			group->poll_min_period * UPDATES_PER_WINDOW;
+	}
+
+	if (now > group->polling_until) {
+		group->polling_next_update = ULLONG_MAX;
+		goto out;
+	}
+
+	if (now >= group->polling_next_update)
+		group->polling_next_update = update_triggers(group, now);
+
+	psi_schedule_poll_work(group,
+		nsecs_to_jiffies(group->polling_next_update - now) + 1);
+
+out:
+	mutex_unlock(&group->trigger_lock);
+}
+
+static void record_times(struct psi_group_cpu *groupc, int cpu,
+			 bool memstall_tick)
+{
+	u32 delta;
+	u64 now;
+
+	now = cpu_clock(cpu);
+	delta = now - groupc->state_start;
+	groupc->state_start = now;
+
+	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
+		groupc->times[PSI_IO_SOME] += delta;
+		if (groupc->state_mask & (1 << PSI_IO_FULL))
+			groupc->times[PSI_IO_FULL] += delta;
+	}
+
+	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
+		groupc->times[PSI_MEM_SOME] += delta;
+		if (groupc->state_mask & (1 << PSI_MEM_FULL))
+			groupc->times[PSI_MEM_FULL] += delta;
+		else if (memstall_tick) {
+			u32 sample;
+			/*
+			 * Since we care about lost potential, a
+			 * memstall is FULL when there are no other
+			 * working tasks, but also when the CPU is
+			 * actively reclaiming and nothing productive
+			 * could run even if it were runnable.
+			 *
+			 * When the timer tick sees a reclaiming CPU,
+			 * regardless of runnable tasks, sample a FULL
+			 * tick (or less if it hasn't been a full tick
+			 * since the last state change).
+			 */
+			sample = min(delta, (u32)jiffies_to_nsecs(1));
+			groupc->times[PSI_MEM_FULL] += sample;
+		}
+	}
+
+	if (groupc->state_mask & (1 << PSI_CPU_SOME))
+		groupc->times[PSI_CPU_SOME] += delta;
+
+	if (groupc->state_mask & (1 << PSI_NONIDLE))
+		groupc->times[PSI_NONIDLE] += delta;
+}
+
+static u32 psi_group_change(struct psi_group *group, int cpu,
+			    unsigned int clear, unsigned int set)
+{
+	struct psi_group_cpu *groupc;
+	unsigned int t, m;
+	enum psi_states s;
+	u32 state_mask = 0;
+
+	groupc = per_cpu_ptr(group->pcpu, cpu);
+
+	/*
+	 * First we assess the aggregate resource states this CPU's
+	 * tasks have been in since the last change, and account any
+	 * SOME and FULL time these may have resulted in.
+	 *
+	 * Then we update the task counts according to the state
+	 * change requested through the @clear and @set bits.
+	 */
+	write_seqcount_begin(&groupc->seq);
+
+	record_times(groupc, cpu, false);
+
+	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
+		if (!(m & (1 << t)))
+			continue;
+		if (groupc->tasks[t] == 0 && !psi_bug) {
+			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
+					cpu, t, groupc->tasks[0],
+					groupc->tasks[1], groupc->tasks[2],
+					clear, set);
+			psi_bug = 1;
+		}
+		groupc->tasks[t]--;
+	}
+
+	for (t = 0; set; set &= ~(1 << t), t++)
+		if (set & (1 << t))
+			groupc->tasks[t]++;
+
+	/* Calculate state mask representing active states */
+	for (s = 0; s < NR_PSI_STATES; s++) {
+		if (test_state(groupc->tasks, s))
+			state_mask |= (1 << s);
+	}
+	groupc->state_mask = state_mask;
+
+	write_seqcount_end(&groupc->seq);
+
+	return state_mask;
+}
+
+static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
+{
+#ifdef CONFIG_CGROUPS
+	struct cgroup *cgroup = NULL;
+
+	if (!*iter)
+		cgroup = task->cgroups->dfl_cgrp;
+	else if (*iter == &psi_system)
+		return NULL;
+	else
+		cgroup = cgroup_parent(*iter);
+
+	if (cgroup && cgroup_parent(cgroup)) {
+		*iter = cgroup;
+		return cgroup_psi(cgroup);
+	}
+#else
+	if (*iter)
+		return NULL;
+#endif
+	*iter = &psi_system;
+	return &psi_system;
+}
+
+void psi_task_change(struct task_struct *task, int clear, int set)
+{
+	int cpu = task_cpu(task);
+	struct psi_group *group;
+	bool wake_clock = true;
+	void *iter = NULL;
+
+	if (!task->pid)
+		return;
+
+	if (((task->psi_flags & set) ||
+	     (task->psi_flags & clear) != clear) &&
+	    !psi_bug) {
+		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
+				task->pid, task->comm, cpu,
+				task->psi_flags, clear, set);
+		psi_bug = 1;
+	}
+
+	task->psi_flags &= ~clear;
+	task->psi_flags |= set;
+
+	/*
+	 * Periodic aggregation shuts off if there is a period of no
+	 * task changes, so we wake it back up if necessary. However,
+	 * don't do this if the task change is the aggregation worker
+	 * itself going to sleep, or we'll ping-pong forever.
+	 */
+	if (unlikely((clear & TSK_RUNNING) &&
+		     (task->flags & PF_WQ_WORKER) &&
+		     wq_worker_last_func(task) == psi_avgs_work))
+		wake_clock = false;
+
+	while ((group = iterate_groups(task, &iter))) {
+		u32 state_mask = psi_group_change(group, cpu, clear, set);
+
+		if (state_mask & group->poll_states)
+			psi_schedule_poll_work(group, 1);
+
+		if (wake_clock && !delayed_work_pending(&group->avgs_work))
+			schedule_delayed_work(&group->avgs_work, PSI_FREQ);
+	}
+}
+
+void psi_memstall_tick(struct task_struct *task, int cpu)
+{
+	struct psi_group *group;
+	void *iter = NULL;
+
+	while ((group = iterate_groups(task, &iter))) {
+		struct psi_group_cpu *groupc;
+
+		groupc = per_cpu_ptr(group->pcpu, cpu);
+		write_seqcount_begin(&groupc->seq);
+		record_times(groupc, cpu, true);
+		write_seqcount_end(&groupc->seq);
+	}
+}
+
+/**
+ * psi_memstall_enter - mark the beginning of a memory stall section
+ * @flags: flags to handle nested sections
+ *
+ * Marks the calling task as being stalled due to a lack of memory,
+ * such as waiting for a refault or performing reclaim.
+ */
+void psi_memstall_enter(unsigned long *flags)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	*flags = current->flags & PF_MEMSTALL;
+	if (*flags)
+		return;
+	/*
+	 * PF_MEMSTALL setting & accounting needs to be atomic wrt
+	 * changes to the task's scheduling state, otherwise we can
+	 * race with CPU migration.
+	 */
+	rq = this_rq_lock_irq(&rf);
+
+	current->flags |= PF_MEMSTALL;
+	psi_task_change(current, 0, TSK_MEMSTALL);
+
+	rq_unlock_irq(rq, &rf);
+}
+
+/**
+ * psi_memstall_leave - mark the end of an memory stall section
+ * @flags: flags to handle nested memdelay sections
+ *
+ * Marks the calling task as no longer stalled due to lack of memory.
+ */
+void psi_memstall_leave(unsigned long *flags)
+{
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (*flags)
+		return;
+	/*
+	 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
+	 * changes to the task's scheduling state, otherwise we could
+	 * race with CPU migration.
+	 */
+	rq = this_rq_lock_irq(&rf);
+
+	current->flags &= ~PF_MEMSTALL;
+	psi_task_change(current, TSK_MEMSTALL, 0);
+
+	rq_unlock_irq(rq, &rf);
+}
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgroup)
+{
+	if (static_branch_likely(&psi_disabled))
+		return 0;
+
+	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
+	if (!cgroup->psi.pcpu)
+		return -ENOMEM;
+	group_init(&cgroup->psi);
+	return 0;
+}
+
+void psi_cgroup_free(struct cgroup *cgroup)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
+	free_percpu(cgroup->psi.pcpu);
+	/* All triggers must be removed by now */
+	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
+}
+
+/**
+ * cgroup_move_task - move task to a different cgroup
+ * @task: the task
+ * @to: the target css_set
+ *
+ * Move task to a new cgroup and safely migrate its associated stall
+ * state between the different groups.
+ *
+ * This function acquires the task's rq lock to lock out concurrent
+ * changes to the task's scheduling state and - in case the task is
+ * running - concurrent changes to its stall state.
+ */
+void cgroup_move_task(struct task_struct *task, struct css_set *to)
+{
+	unsigned int task_flags = 0;
+	struct rq_flags rf;
+	struct rq *rq;
+
+	if (static_branch_likely(&psi_disabled)) {
+		/*
+		 * Lame to do this here, but the scheduler cannot be locked
+		 * from the outside, so we move cgroups from inside sched/.
+		 */
+		rcu_assign_pointer(task->cgroups, to);
+		return;
+	}
+
+	rq = task_rq_lock(task, &rf);
+
+	if (task_on_rq_queued(task))
+		task_flags = TSK_RUNNING;
+	else if (task->in_iowait)
+		task_flags = TSK_IOWAIT;
+
+	if (task->flags & PF_MEMSTALL)
+		task_flags |= TSK_MEMSTALL;
+
+	if (task_flags)
+		psi_task_change(task, task_flags, 0);
+
+	/* See comment above */
+	rcu_assign_pointer(task->cgroups, to);
+
+	if (task_flags)
+		psi_task_change(task, 0, task_flags);
+
+	task_rq_unlock(rq, task, &rf);
+}
+#endif /* CONFIG_CGROUPS */
+
+int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
+{
+	int full;
+	u64 now;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	/* Update averages before reporting them */
+	mutex_lock(&group->avgs_lock);
+	now = sched_clock();
+	collect_percpu_times(group, PSI_AVGS, NULL);
+	if (now >= group->avg_next_update)
+		group->avg_next_update = update_averages(group, now);
+	mutex_unlock(&group->avgs_lock);
+
+	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
+		unsigned long avg[3];
+		u64 total;
+		int w;
+
+		for (w = 0; w < 3; w++)
+			avg[w] = group->avg[res * 2 + full][w];
+		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
+				NSEC_PER_USEC);
+
+		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
+			   full ? "full" : "some",
+			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
+			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
+			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
+			   total);
+	}
+
+	return 0;
+}
+
+static int psi_io_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_IO);
+}
+
+static int psi_memory_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_MEM);
+}
+
+static int psi_cpu_show(struct seq_file *m, void *v)
+{
+	return psi_show(m, &psi_system, PSI_CPU);
+}
+
+static int psi_io_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_io_show, NULL);
+}
+
+static int psi_memory_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_memory_show, NULL);
+}
+
+static int psi_cpu_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, psi_cpu_show, NULL);
+}
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+			char *buf, size_t nbytes, enum psi_res res)
+{
+	struct psi_trigger *t;
+	enum psi_states state;
+	u32 threshold_us;
+	u32 window_us;
+
+	if (static_branch_likely(&psi_disabled))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_SOME + res * 2;
+	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+		state = PSI_IO_FULL + res * 2;
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (state >= PSI_NONIDLE)
+		return ERR_PTR(-EINVAL);
+
+	if (window_us < WINDOW_MIN_US ||
+		window_us > WINDOW_MAX_US)
+		return ERR_PTR(-EINVAL);
+
+	/* Check threshold */
+	if (threshold_us == 0 || threshold_us > window_us)
+		return ERR_PTR(-EINVAL);
+
+	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		return ERR_PTR(-ENOMEM);
+
+	t->group = group;
+	t->state = state;
+	t->threshold = threshold_us * NSEC_PER_USEC;
+	t->win.size = window_us * NSEC_PER_USEC;
+	window_reset(&t->win, 0, 0, 0);
+
+	t->event = 0;
+	t->last_event_time = 0;
+	init_waitqueue_head(&t->event_wait);
+	kref_init(&t->refcount);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!rcu_access_pointer(group->poll_kworker)) {
+		struct sched_param param = {
+			.sched_priority = MAX_RT_PRIO - 1,
+		};
+		struct kthread_worker *kworker;
+
+		kworker = kthread_create_worker(0, "psimon");
+		if (IS_ERR(kworker)) {
+			kfree(t);
+			mutex_unlock(&group->trigger_lock);
+			return ERR_CAST(kworker);
+		}
+		sched_setscheduler(kworker->task, SCHED_FIFO, &param);
+		kthread_init_delayed_work(&group->poll_work,
+				psi_poll_work);
+		rcu_assign_pointer(group->poll_kworker, kworker);
+	}
+
+	list_add(&t->node, &group->triggers);
+	group->poll_min_period = min(group->poll_min_period,
+		div_u64(t->win.size, UPDATES_PER_WINDOW));
+	group->nr_triggers[t->state]++;
+	group->poll_states |= (1 << t->state);
+
+	mutex_unlock(&group->trigger_lock);
+
+	return t;
+}
+
+static void psi_trigger_destroy(struct kref *ref)
+{
+	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
+	struct psi_group *group = t->group;
+	struct kthread_worker *kworker_to_destroy = NULL;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	/*
+	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+	 * from under a polling process.
+	 */
+	wake_up_interruptible(&t->event_wait);
+
+	mutex_lock(&group->trigger_lock);
+
+	if (!list_empty(&t->node)) {
+		struct psi_trigger *tmp;
+		u64 period = ULLONG_MAX;
+
+		list_del(&t->node);
+		group->nr_triggers[t->state]--;
+		if (!group->nr_triggers[t->state])
+			group->poll_states &= ~(1 << t->state);
+		/* reset min update period for the remaining triggers */
+		list_for_each_entry(tmp, &group->triggers, node)
+			period = min(period, div_u64(tmp->win.size,
+					UPDATES_PER_WINDOW));
+		group->poll_min_period = period;
+		/* Destroy poll_kworker when the last trigger is destroyed */
+		if (group->poll_states == 0) {
+			group->polling_until = 0;
+			kworker_to_destroy = rcu_dereference_protected(
+					group->poll_kworker,
+					lockdep_is_held(&group->trigger_lock));
+			rcu_assign_pointer(group->poll_kworker, NULL);
+		}
+	}
+
+	mutex_unlock(&group->trigger_lock);
+
+	/*
+	 * Wait for both *trigger_ptr from psi_trigger_replace and
+	 * poll_kworker RCUs to complete their read-side critical sections
+	 * before destroying the trigger and optionally the poll_kworker
+	 */
+	synchronize_rcu();
+	/*
+	 * Destroy the kworker after releasing trigger_lock to prevent a
+	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
+	 */
+	if (kworker_to_destroy) {
+		kthread_cancel_delayed_work_sync(&group->poll_work);
+		kthread_destroy_worker(kworker_to_destroy);
+	}
+	kfree(t);
+}
+
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
+{
+	struct psi_trigger *old = *trigger_ptr;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	rcu_assign_pointer(*trigger_ptr, new);
+	if (old)
+		kref_put(&old->refcount, psi_trigger_destroy);
+}
+
+unsigned int psi_trigger_poll(void **trigger_ptr, struct file *file,
+			      poll_table *wait)
+{
+	unsigned int ret = DEFAULT_POLLMASK;
+	struct psi_trigger *t;
+
+	if (static_branch_likely(&psi_disabled))
+		return DEFAULT_POLLMASK | POLLERR | POLLPRI;
+
+	rcu_read_lock();
+
+	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
+	if (!t) {
+		rcu_read_unlock();
+		return DEFAULT_POLLMASK | POLLERR | POLLPRI;
+	}
+	kref_get(&t->refcount);
+
+	rcu_read_unlock();
+
+	poll_wait(file, &t->event_wait, wait);
+
+	if (cmpxchg(&t->event, 1, 0) == 1)
+		ret |= POLLPRI;
+
+	kref_put(&t->refcount, psi_trigger_destroy);
+
+	return ret;
+}
+
+static ssize_t psi_write(struct file *file, const char __user *user_buf,
+			 size_t nbytes, enum psi_res res)
+{
+	char buf[32];
+	size_t buf_size;
+	struct seq_file *seq;
+	struct psi_trigger *new;
+
+	if (static_branch_likely(&psi_disabled))
+		return -EOPNOTSUPP;
+
+	buf_size = min(nbytes, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size - 1] = '\0';
+
+	new = psi_trigger_create(&psi_system, buf, nbytes, res);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	seq = file->private_data;
+	/* Take seq->lock to protect seq->private from concurrent writes */
+	mutex_lock(&seq->lock);
+	psi_trigger_replace(&seq->private, new);
+	mutex_unlock(&seq->lock);
+
+	return nbytes;
+}
+
+static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_IO);
+}
+
+static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
+				size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_MEM);
+}
+
+static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
+			     size_t nbytes, loff_t *ppos)
+{
+	return psi_write(file, user_buf, nbytes, PSI_CPU);
+}
+
+static unsigned int psi_fop_poll(struct file *file, poll_table *wait)
+{
+	struct seq_file *seq = file->private_data;
+
+	return psi_trigger_poll(&seq->private, file, wait);
+}
+
+static int psi_fop_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	psi_trigger_replace(&seq->private, NULL);
+	return single_release(inode, file);
+}
+
+static const struct file_operations psi_io_fops = {
+	.open           = psi_io_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_io_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static const struct file_operations psi_memory_fops = {
+	.open           = psi_memory_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_memory_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static const struct file_operations psi_cpu_fops = {
+	.open           = psi_cpu_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.write          = psi_cpu_write,
+	.poll           = psi_fop_poll,
+	.release        = psi_fop_release,
+};
+
+static int __init psi_proc_init(void)
+{
+	proc_mkdir("pressure", NULL);
+	proc_create("pressure/io", 0, NULL, &psi_io_fops);
+	proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
+	proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
+	return 0;
+}
+module_init(psi_proc_init);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 95b68bd..1d704a4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1625,7 +1625,7 @@
 }
 
 static struct task_struct *
-pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	struct task_struct *p;
 	struct rt_rq *rt_rq = &rq->rt;
@@ -1637,9 +1637,9 @@
 		 * disabled avoiding further scheduler activity on it and we're
 		 * being very careful to re-start the picking loop.
 		 */
-		lockdep_unpin_lock(&rq->lock, cookie);
+		rq_unpin_lock(rq, rf);
 		pull_rt_task(rq);
-		lockdep_repin_lock(&rq->lock, cookie);
+		rq_repin_lock(rq, rf);
 		/*
 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
 		 * means a dl or stop task can slip in, in which case we need
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9942e02..8b351c86 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,11 +2,13 @@
 #include <linux/sched.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
+#include <linux/sched/smt.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/sched/deadline.h>
 #include <linux/kernel_stat.h>
 #include <linux/binfmts.h>
 #include <linux/mutex.h>
+#include <linux/psi.h>
 #include <linux/spinlock.h>
 #include <linux/stop_machine.h>
 #include <linux/irq_work.h>
@@ -303,6 +305,7 @@
 #ifdef CONFIG_CGROUP_SCHED
 
 #include <linux/cgroup.h>
+#include <linux/psi.h>
 
 struct cfs_rq;
 struct rt_rq;
@@ -901,6 +904,8 @@
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 #define raw_rq()		raw_cpu_ptr(&runqueues)
 
+extern void update_rq_clock(struct rq *rq);
+
 static inline u64 __rq_clock_broken(struct rq *rq)
 {
 	return READ_ONCE(rq->clock);
@@ -930,6 +935,118 @@
 		rq->clock_skip_update &= ~RQCF_REQ_SKIP;
 }
 
+struct rq_flags {
+	unsigned long flags;
+	struct pin_cookie cookie;
+};
+
+static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
+{
+	rf->cookie = lockdep_pin_lock(&rq->lock);
+}
+
+static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
+{
+	lockdep_unpin_lock(&rq->lock, rf->cookie);
+}
+
+static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
+{
+	lockdep_repin_lock(&rq->lock, rf->cookie);
+}
+
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(rq->lock);
+
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock);
+
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+}
+
+static inline void
+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irqsave(&rq->lock, rf->flags);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock_irq(&rq->lock);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock(&rq->lock);
+	rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_relock(struct rq *rq, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	raw_spin_lock(&rq->lock);
+	rq_repin_lock(rq, rf);
+}
+
+static inline void
+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
+}
+
+static inline void
+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock_irq(&rq->lock);
+}
+
+static inline void
+rq_unlock(struct rq *rq, struct rq_flags *rf)
+	__releases(rq->lock)
+{
+	rq_unpin_lock(rq, rf);
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline struct rq *
+this_rq_lock_irq(struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	local_irq_disable();
+	rq = this_rq();
+	rq_lock(rq, rf);
+	return rq;
+}
+
 #ifdef CONFIG_NUMA
 enum numa_topology_type {
 	NUMA_DIRECT,
@@ -1404,7 +1521,7 @@
 	 */
 	struct task_struct * (*pick_next_task) (struct rq *rq,
 						struct task_struct *prev,
-						struct pin_cookie cookie);
+						struct rq_flags *rf);
 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
@@ -1656,8 +1773,6 @@
 #endif
 }
 
-extern void update_rq_clock(struct rq *rq);
-
 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 
@@ -1953,34 +2068,6 @@
 static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
-struct rq_flags {
-	unsigned long flags;
-	struct pin_cookie cookie;
-};
-
-struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-	__acquires(rq->lock);
-struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
-	__acquires(p->pi_lock)
-	__acquires(rq->lock);
-
-static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
-	__releases(rq->lock)
-{
-	lockdep_unpin_lock(&rq->lock, rf->cookie);
-	raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
-	__releases(rq->lock)
-	__releases(p->pi_lock)
-{
-	lockdep_unpin_lock(&rq->lock, rf->cookie);
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
-}
-
 extern struct rq *lock_rq_of(struct task_struct *p, struct rq_flags *flags);
 extern void unlock_rq_of(struct rq *rq, struct task_struct *p, struct rq_flags *flags);
 
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 34659a8..3c0d8f1 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -54,6 +54,92 @@
 #define schedstat_val_or_zero(var)	0
 #endif /* CONFIG_SCHEDSTATS */
 
+#ifdef CONFIG_PSI
+/*
+ * PSI tracks state that persists across sleeps, such as iowaits and
+ * memory stalls. As a result, it has to distinguish between sleeps,
+ * where a task's runnable state changes, and requeues, where a task
+ * and its state are being moved between CPUs and runqueues.
+ */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup)
+{
+	int clear = 0, set = TSK_RUNNING;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (!wakeup || p->sched_psi_wake_requeue) {
+		if (p->flags & PF_MEMSTALL)
+			set |= TSK_MEMSTALL;
+		if (p->sched_psi_wake_requeue)
+			p->sched_psi_wake_requeue = 0;
+	} else {
+		if (p->in_iowait)
+			clear |= TSK_IOWAIT;
+	}
+
+	psi_task_change(p, clear, set);
+}
+
+static inline void psi_dequeue(struct task_struct *p, bool sleep)
+{
+	int clear = TSK_RUNNING, set = 0;
+
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (!sleep) {
+		if (p->flags & PF_MEMSTALL)
+			clear |= TSK_MEMSTALL;
+	} else {
+		if (p->in_iowait)
+			set |= TSK_IOWAIT;
+	}
+
+	psi_task_change(p, clear, set);
+}
+
+static inline void psi_ttwu_dequeue(struct task_struct *p)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+	/*
+	 * Is the task being migrated during a wakeup? Make sure to
+	 * deregister its sleep-persistent psi states from the old
+	 * queue, and let psi_enqueue() know it has to requeue.
+	 */
+	if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
+		struct rq_flags rf;
+		struct rq *rq;
+		int clear = 0;
+
+		if (p->in_iowait)
+			clear |= TSK_IOWAIT;
+		if (p->flags & PF_MEMSTALL)
+			clear |= TSK_MEMSTALL;
+
+		rq = __task_rq_lock(p, &rf);
+		psi_task_change(p, clear, 0);
+		p->sched_psi_wake_requeue = 1;
+		__task_rq_unlock(rq, &rf);
+	}
+}
+
+static inline void psi_task_tick(struct rq *rq)
+{
+	if (static_branch_likely(&psi_disabled))
+		return;
+
+	if (unlikely(rq->curr->flags & PF_MEMSTALL))
+		psi_memstall_tick(rq->curr, cpu_of(rq));
+}
+#else /* CONFIG_PSI */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
+static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
+static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+static inline void psi_task_tick(struct rq *rq) {}
+#endif /* CONFIG_PSI */
+
 #ifdef CONFIG_SCHED_INFO
 static inline void sched_info_reset_dequeued(struct task_struct *t)
 {
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 11a1888..a6ce92c 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -25,7 +25,7 @@
 }
 
 static struct task_struct *
-pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
+pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	struct task_struct *stop = rq->stop;
 
diff --git a/kernel/sys.c b/kernel/sys.c
index 60e7ee0..181a7f0 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -51,6 +51,7 @@
 #include <linux/binfmts.h>
 
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/rcupdate.h>
 #include <linux/uidgid.h>
 #include <linux/cred.h>
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b4f1889..a3c6446 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -125,7 +125,9 @@
 static int __maybe_unused two = 2;
 static int __maybe_unused three = 3;
 static int __maybe_unused four = 4;
+static unsigned long zero_ul;
 static unsigned long one_ul = 1;
+static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
 static int __maybe_unused one_thousand = 1000;
 #ifdef CONFIG_SCHED_WALT
@@ -1881,6 +1883,8 @@
 		.maxlen		= sizeof(files_stat.max_files),
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
+		.extra1		= &zero_ul,
+		.extra2		= &long_max,
 	},
 	{
 		.procname	= "nr_open",
@@ -2576,7 +2580,16 @@
 {
 	struct do_proc_dointvec_minmax_conv_param *param = data;
 	if (write) {
-		int val = *negp ? -*lvalp : *lvalp;
+		int val;
+		if (*negp) {
+			if (*lvalp > (unsigned long) INT_MAX + 1)
+				return -EINVAL;
+			val = -*lvalp;
+		} else {
+			if (*lvalp > (unsigned long) INT_MAX)
+				return -EINVAL;
+			val = *lvalp;
+		}
 		if ((param->min && *param->min > val) ||
 		    (param->max && *param->max < val))
 			return -EINVAL;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9d4c257..86e8969 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/syscore_ops.h>
 #include <linux/clocksource.h>
 #include <linux/jiffies.h>
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ae6bdf3..618c7de 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -32,6 +32,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -5247,7 +5248,7 @@
 	tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 		       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -5310,12 +5311,14 @@
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
 			      struct ftrace_ops *op, struct pt_regs *regs)
 {
 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -5345,6 +5348,7 @@
 	preempt_enable_notrace();
 	trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f316e90..2cfe11e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -701,7 +701,7 @@
 
 	preempt_disable_notrace();
 	time = rb_time_stamp(buffer);
-	preempt_enable_no_resched_notrace();
+	preempt_enable_notrace();
 
 	return time;
 }
@@ -4037,6 +4037,7 @@
  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
  * @buffer: The ring buffer to read from
  * @cpu: The cpu buffer to iterate over
+ * @flags: gfp flags to use for memory allocation
  *
  * This performs the initial preparations necessary to iterate
  * through the buffer.  Memory is allocated, buffer recording
@@ -4054,7 +4055,7 @@
  * This overall must be paired with ring_buffer_read_finish.
  */
 struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct ring_buffer_iter *iter;
@@ -4062,7 +4063,7 @@
 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
 		return NULL;
 
-	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+	iter = kmalloc(sizeof(*iter), flags);
 	if (!iter)
 		return NULL;
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c13e83b..7dba55c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -501,8 +501,10 @@
 	 * not modified.
 	 */
 	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-	if (!pid_list)
+	if (!pid_list) {
+		trace_parser_put(&parser);
 		return -ENOMEM;
+	}
 
 	pid_list->pid_max = READ_ONCE(pid_max);
 
@@ -512,6 +514,7 @@
 
 	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
 	if (!pid_list->pids) {
+		trace_parser_put(&parser);
 		kfree(pid_list);
 		return -ENOMEM;
 	}
@@ -3522,7 +3525,8 @@
 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 		for_each_tracing_cpu(cpu) {
 			iter->buffer_iter[cpu] =
-				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+				ring_buffer_read_prepare(iter->trace_buffer->buffer,
+							 cpu, GFP_KERNEL);
 		}
 		ring_buffer_read_prepare_sync();
 		for_each_tracing_cpu(cpu) {
@@ -3532,7 +3536,8 @@
 	} else {
 		cpu = iter->cpu_file;
 		iter->buffer_iter[cpu] =
-			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+			ring_buffer_read_prepare(iter->trace_buffer->buffer,
+						 cpu, GFP_KERNEL);
 		ring_buffer_read_prepare_sync();
 		ring_buffer_read_start(iter->buffer_iter[cpu]);
 		tracing_iter_reset(iter, cpu);
@@ -5210,7 +5215,6 @@
 	return ret;
 
 fail:
-	kfree(iter->trace);
 	kfree(iter);
 	__trace_array_put(tr);
 	mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index b15316a..ffa1a0b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1311,9 +1311,6 @@
 	char buf[32];
 	int len;
 
-	if (*ppos)
-		return 0;
-
 	if (unlikely(!id))
 		return -ENODEV;
 
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0664044..766e5cc 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -871,9 +871,10 @@
 		/* ensure NULL-termination */
 		if (size > key_field->size - 1)
 			size = key_field->size - 1;
-	}
 
-	memcpy(compound_key + key_field->offset, key, size);
+		strncpy(compound_key + key_field->offset, (char *)key, size);
+	} else
+		memcpy(compound_key + key_field->offset, key, size);
 }
 
 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 57149bc..8964582 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -50,14 +50,16 @@
 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
 		for_each_tracing_cpu(cpu) {
 			iter.buffer_iter[cpu] =
-			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
+			ring_buffer_read_prepare(iter.trace_buffer->buffer,
+						 cpu, GFP_ATOMIC);
 			ring_buffer_read_start(iter.buffer_iter[cpu]);
 			tracing_iter_reset(&iter, cpu);
 		}
 	} else {
 		iter.cpu_file = cpu_file;
 		iter.buffer_iter[cpu_file] =
-			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
+			ring_buffer_read_prepare(iter.trace_buffer->buffer,
+						 cpu_file, GFP_ATOMIC);
 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
 		tracing_iter_reset(&iter, cpu_file);
 	}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 20bee0c..255a824 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -614,7 +614,7 @@
 
 	/* Don't print "0x  (null)" when offset is 0 */
 	if (tu->offset) {
-		seq_printf(m, "0x%p", (void *)tu->offset);
+		seq_printf(m, "0x%px", (void *)tu->offset);
 	} else {
 		switch (sizeof(void *)) {
 		case 4:
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3806a97..e14c99521 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -293,6 +293,8 @@
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
+bool wq_online;				/* can kworkers be created yet? */
+
 static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
 
 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@ -912,6 +914,26 @@
 }
 
 /**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+	struct worker *worker = kthread_data(task);
+
+	return worker->last_func;
+}
+
+/**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
@@ -2130,6 +2152,9 @@
 	if (unlikely(cpu_intensive))
 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
+	/* tag the worker for identification in schedule() */
+	worker->last_func = worker->current_func;
+
 	/* we're done with it, release */
 	hash_del(&worker->hentry);
 	worker->current_work = NULL;
@@ -2586,6 +2611,9 @@
 	};
 	int next_color;
 
+	if (WARN_ON(!wq_online))
+		return;
+
 	lock_map_acquire(&wq->lockdep_map);
 	lock_map_release(&wq->lockdep_map);
 
@@ -2846,6 +2874,9 @@
 {
 	struct wq_barrier barr;
 
+	if (WARN_ON(!wq_online))
+		return false;
+
 	lock_map_acquire(&work->lockdep_map);
 	lock_map_release(&work->lockdep_map);
 
@@ -2916,7 +2947,13 @@
 	mark_work_canceling(work);
 	local_irq_restore(flags);
 
-	flush_work(work);
+	/*
+	 * This allows canceling during early boot.  We know that @work
+	 * isn't executing.
+	 */
+	if (wq_online)
+		flush_work(work);
+
 	clear_work_data(work);
 
 	/*
@@ -3366,7 +3403,7 @@
 		goto fail;
 
 	/* create and start the initial worker */
-	if (!create_worker(pool))
+	if (wq_online && !create_worker(pool))
 		goto fail;
 
 	/* install */
@@ -3431,6 +3468,7 @@
 {
 	struct workqueue_struct *wq = pwq->wq;
 	bool freezable = wq->flags & WQ_FREEZABLE;
+	unsigned long flags;
 
 	/* for @wq->saved_max_active */
 	lockdep_assert_held(&wq->mutex);
@@ -3439,7 +3477,8 @@
 	if (!freezable && pwq->max_active == wq->saved_max_active)
 		return;
 
-	spin_lock_irq(&pwq->pool->lock);
+	/* this function can be called during early boot w/ irq disabled */
+	spin_lock_irqsave(&pwq->pool->lock, flags);
 
 	/*
 	 * During [un]freezing, the caller is responsible for ensuring that
@@ -3462,7 +3501,7 @@
 		pwq->max_active = 0;
 	}
 
-	spin_unlock_irq(&pwq->pool->lock);
+	spin_unlock_irqrestore(&pwq->pool->lock, flags);
 }
 
 /* initialize newly alloced @pwq which is associated with @wq and @pool */
@@ -5509,7 +5548,17 @@
 	wq_numa_enabled = true;
 }
 
-static int __init init_workqueues(void)
+/**
+ * workqueue_init_early - early init for workqueue subsystem
+ *
+ * This is the first half of two-staged workqueue subsystem initialization
+ * and invoked as soon as the bare basics - memory allocation, cpumasks and
+ * idr are up.  It sets up all the data structures and system workqueues
+ * and allows early boot code to create workqueues and queue/cancel work
+ * items.  Actual work item execution starts only after kthreads can be
+ * created and scheduled right before early initcalls.
+ */
+int __init workqueue_init_early(void)
 {
 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
 	int i, cpu;
@@ -5542,16 +5591,6 @@
 		}
 	}
 
-	/* create the initial worker */
-	for_each_online_cpu(cpu) {
-		struct worker_pool *pool;
-
-		for_each_cpu_worker_pool(pool, cpu) {
-			pool->flags &= ~POOL_DISASSOCIATED;
-			BUG_ON(!create_worker(pool));
-		}
-	}
-
 	/* create default unbound and ordered wq attrs */
 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
 		struct workqueue_attrs *attrs;
@@ -5588,8 +5627,36 @@
 	       !system_power_efficient_wq ||
 	       !system_freezable_power_efficient_wq);
 
+	return 0;
+}
+
+/**
+ * workqueue_init - bring workqueue subsystem fully online
+ *
+ * This is the latter half of two-staged workqueue subsystem initialization
+ * and invoked as soon as kthreads can be created and scheduled.
+ * Workqueues have been created and work items queued on them, but there
+ * are no kworkers executing the work items yet.  Populate the worker pools
+ * with the initial workers and enable future kworker creations.
+ */
+int __init workqueue_init(void)
+{
+	struct worker_pool *pool;
+	int cpu, bkt;
+
+	/* create the initial workers */
+	for_each_online_cpu(cpu) {
+		for_each_cpu_worker_pool(pool, cpu) {
+			pool->flags &= ~POOL_DISASSOCIATED;
+			BUG_ON(!create_worker(pool));
+		}
+	}
+
+	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
+		BUG_ON(!create_worker(pool));
+
+	wq_online = true;
 	wq_watchdog_init();
 
 	return 0;
 }
-early_initcall(init_workqueues);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 29fa81f..c7f7db1 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@
 
 	/* used only by rescuers to point to the target workqueue */
 	struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */
+
+	/* used by the scheduler to determine a worker's last known identity */
+	work_func_t		last_func;
 };
 
 /**
@@ -67,9 +70,10 @@
 
 /*
  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
- * sched/core.c and workqueue.c.
+ * sched/ and workqueue.c.
  */
 void wq_worker_waking_up(struct task_struct *task, int cpu);
 struct task_struct *wq_worker_sleeping(struct task_struct *task);
+work_func_t wq_worker_last_func(struct task_struct *task);
 
 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ad250d1..639f799 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1939,9 +1939,9 @@
 	tristate "Perform selftest on hash functions"
 	default n
 	help
-	  Enable this option to test the kernel's integer (<linux/hash,h>)
-	  and string (<linux/stringhash.h>) hash functions on boot
-	  (or module load).
+	  Enable this option to test the kernel's integer (<linux/hash.h>),
+	  string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
+	  hash functions on boot (or module load).
 
 	  This is intended to help people writing architecture-specific
 	  optimized versions.  If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index 78e7175..0ce29cf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,7 +22,8 @@
 	 sha1.o chacha.o md5.o irq_regs.o argv_split.o \
 	 flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-	 earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
+	 earlycpio.o seq_buf.o siphash.o \
+	 nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -46,8 +47,9 @@
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
-obj-$(CONFIG_TEST_HASH) += test_hash.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
 obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+CFLAGS_test_kasan.o += -fno-builtin
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_LKM) += test_module.o
 obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 5cd0935..3b46c54 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -781,9 +781,11 @@
 		new_s0->index_key[i] =
 			ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
 
-	blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
-	pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
-	new_s0->index_key[keylen - 1] &= ~blank;
+	if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+		blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+		pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+		new_s0->index_key[keylen - 1] &= ~blank;
+	}
 
 	/* This now reduces to a node splitting exercise for which we'll need
 	 * to regenerate the disparity table.
diff --git a/lib/bsearch.c b/lib/bsearch.c
index e33c179..d500484 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -11,6 +11,7 @@
 
 #include <linux/export.h>
 #include <linux/bsearch.h>
+#include <linux/kprobes.h>
 
 /*
  * bsearch - binary search an array of elements
@@ -51,3 +52,4 @@
 	return NULL;
 }
 EXPORT_SYMBOL(bsearch);
+NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/bug.c b/lib/bug.c
index bc3656e..dc7aa6b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -177,7 +177,7 @@
 	if (file)
 		pr_crit("kernel BUG at %s:%u!\n", file, line);
 	else
-		pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
+		pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
 			(void *)bugaddr);
 
 	return BUG_TRAP_TYPE_BUG;
diff --git a/lib/div64.c b/lib/div64.c
index 7f34525..c1c1a4c 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -102,7 +102,7 @@
 		quot = div_u64_rem(dividend, divisor, &rem32);
 		*remainder = rem32;
 	} else {
-		int n = 1 + fls(high);
+		int n = fls(high);
 		quot = div_u64(dividend >> n, divisor >> n);
 
 		if (quot != 0)
@@ -140,7 +140,7 @@
 	if (high == 0) {
 		quot = div_u64(dividend, divisor);
 	} else {
-		int n = 1 + fls(high);
+		int n = fls(high);
 		quot = div_u64(dividend >> n, divisor >> n);
 
 		if (quot != 0)
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 1ef4cc3..6d35274 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -7,6 +7,7 @@
 
 #include <linux/kernel.h>
 #include <linux/export.h>
+#include <linux/bitops.h>
 
 /**
  * int_sqrt - rough approximation to sqrt
@@ -21,7 +22,7 @@
 	if (x <= 1)
 		return x;
 
-	m = 1UL << (BITS_PER_LONG - 2);
+	m = 1UL << (__fls(x) & ~1UL);
 	while (m != 0) {
 		b = y + m;
 		y >>= 1;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 3057011..10ca694 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -24,7 +24,7 @@
 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
 NEON_FLAGS := -ffreestanding
 ifeq ($(ARCH),arm)
-NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
 endif
 ifeq ($(ARCH),arm64)
 CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644
index 0000000..3ae58b4
--- /dev/null
+++ b/lib/siphash.c
@@ -0,0 +1,551 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND \
+	do { \
+	v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+	v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+	v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+	v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+	} while (0)
+
+#define PREAMBLE(len) \
+	u64 v0 = 0x736f6d6570736575ULL; \
+	u64 v1 = 0x646f72616e646f6dULL; \
+	u64 v2 = 0x6c7967656e657261ULL; \
+	u64 v3 = 0x7465646279746573ULL; \
+	u64 b = ((u64)(len)) << 56; \
+	v3 ^= key->key[1]; \
+	v2 ^= key->key[0]; \
+	v1 ^= key->key[1]; \
+	v0 ^= key->key[0];
+
+#define POSTAMBLE \
+	v3 ^= b; \
+	SIPROUND; \
+	SIPROUND; \
+	v0 ^= b; \
+	v2 ^= 0xff; \
+	SIPROUND; \
+	SIPROUND; \
+	SIPROUND; \
+	SIPROUND; \
+	return (v0 ^ v1) ^ (v2 ^ v3);
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u64));
+	const u8 left = len & (sizeof(u64) - 1);
+	u64 m;
+	PREAMBLE(len)
+	for (; data != end; data += sizeof(u64)) {
+		m = le64_to_cpup(data);
+		v3 ^= m;
+		SIPROUND;
+		SIPROUND;
+		v0 ^= m;
+	}
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+	if (left)
+		b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+						  bytemask_from_count(left)));
+#else
+	switch (left) {
+	case 7: b |= ((u64)end[6]) << 48;
+	case 6: b |= ((u64)end[5]) << 40;
+	case 5: b |= ((u64)end[4]) << 32;
+	case 4: b |= le32_to_cpup(data); break;
+	case 3: b |= ((u64)end[2]) << 16;
+	case 2: b |= le16_to_cpup(data); break;
+	case 1: b |= end[0];
+	}
+#endif
+	POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u64));
+	const u8 left = len & (sizeof(u64) - 1);
+	u64 m;
+	PREAMBLE(len)
+	for (; data != end; data += sizeof(u64)) {
+		m = get_unaligned_le64(data);
+		v3 ^= m;
+		SIPROUND;
+		SIPROUND;
+		v0 ^= m;
+	}
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+	if (left)
+		b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+						  bytemask_from_count(left)));
+#else
+	switch (left) {
+	case 7: b |= ((u64)end[6]) << 48;
+	case 6: b |= ((u64)end[5]) << 40;
+	case 5: b |= ((u64)end[4]) << 32;
+	case 4: b |= get_unaligned_le32(end); break;
+	case 3: b |= ((u64)end[2]) << 16;
+	case 2: b |= get_unaligned_le16(end); break;
+	case 1: b |= end[0];
+	}
+#endif
+	POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+#endif
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+	PREAMBLE(8)
+	v3 ^= first;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= first;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+	PREAMBLE(16)
+	v3 ^= first;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= second;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+		 const siphash_key_t *key)
+{
+	PREAMBLE(24)
+	v3 ^= first;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= second;
+	v3 ^= third;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= third;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+		 const u64 forth, const siphash_key_t *key)
+{
+	PREAMBLE(32)
+	v3 ^= first;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= second;
+	v3 ^= third;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= third;
+	v3 ^= forth;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= forth;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+	PREAMBLE(4)
+	b |= first;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+		 const siphash_key_t *key)
+{
+	u64 combined = (u64)second << 32 | first;
+	PREAMBLE(12)
+	v3 ^= combined;
+	SIPROUND;
+	SIPROUND;
+	v0 ^= combined;
+	b |= third;
+	POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+	v3 ^= b; \
+	HSIPROUND; \
+	v0 ^= b; \
+	v2 ^= 0xff; \
+	HSIPROUND; \
+	HSIPROUND; \
+	HSIPROUND; \
+	return (v0 ^ v1) ^ (v2 ^ v3);
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u64));
+	const u8 left = len & (sizeof(u64) - 1);
+	u64 m;
+	HPREAMBLE(len)
+	for (; data != end; data += sizeof(u64)) {
+		m = le64_to_cpup(data);
+		v3 ^= m;
+		HSIPROUND;
+		v0 ^= m;
+	}
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+	if (left)
+		b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+						  bytemask_from_count(left)));
+#else
+	switch (left) {
+	case 7: b |= ((u64)end[6]) << 48;
+	case 6: b |= ((u64)end[5]) << 40;
+	case 5: b |= ((u64)end[4]) << 32;
+	case 4: b |= le32_to_cpup(data); break;
+	case 3: b |= ((u64)end[2]) << 16;
+	case 2: b |= le16_to_cpup(data); break;
+	case 1: b |= end[0];
+	}
+#endif
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+			 const hsiphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u64));
+	const u8 left = len & (sizeof(u64) - 1);
+	u64 m;
+	HPREAMBLE(len)
+	for (; data != end; data += sizeof(u64)) {
+		m = get_unaligned_le64(data);
+		v3 ^= m;
+		HSIPROUND;
+		v0 ^= m;
+	}
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+	if (left)
+		b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+						  bytemask_from_count(left)));
+#else
+	switch (left) {
+	case 7: b |= ((u64)end[6]) << 48;
+	case 6: b |= ((u64)end[5]) << 40;
+	case 5: b |= ((u64)end[4]) << 32;
+	case 4: b |= get_unaligned_le32(end); break;
+	case 3: b |= ((u64)end[2]) << 16;
+	case 2: b |= get_unaligned_le16(end); break;
+	case 1: b |= end[0];
+	}
+#endif
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+	HPREAMBLE(4)
+	b |= first;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+	u64 combined = (u64)second << 32 | first;
+	HPREAMBLE(8)
+	v3 ^= combined;
+	HSIPROUND;
+	v0 ^= combined;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+		  const hsiphash_key_t *key)
+{
+	u64 combined = (u64)second << 32 | first;
+	HPREAMBLE(12)
+	v3 ^= combined;
+	HSIPROUND;
+	v0 ^= combined;
+	b |= third;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+		  const u32 forth, const hsiphash_key_t *key)
+{
+	u64 combined = (u64)second << 32 | first;
+	HPREAMBLE(16)
+	v3 ^= combined;
+	HSIPROUND;
+	v0 ^= combined;
+	combined = (u64)forth << 32 | third;
+	v3 ^= combined;
+	HSIPROUND;
+	v0 ^= combined;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND \
+	do { \
+	v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+	v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+	v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+	v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+	} while (0)
+
+#define HPREAMBLE(len) \
+	u32 v0 = 0; \
+	u32 v1 = 0; \
+	u32 v2 = 0x6c796765U; \
+	u32 v3 = 0x74656462U; \
+	u32 b = ((u32)(len)) << 24; \
+	v3 ^= key->key[1]; \
+	v2 ^= key->key[0]; \
+	v1 ^= key->key[1]; \
+	v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+	v3 ^= b; \
+	HSIPROUND; \
+	v0 ^= b; \
+	v2 ^= 0xff; \
+	HSIPROUND; \
+	HSIPROUND; \
+	HSIPROUND; \
+	return v1 ^ v3;
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u32));
+	const u8 left = len & (sizeof(u32) - 1);
+	u32 m;
+	HPREAMBLE(len)
+	for (; data != end; data += sizeof(u32)) {
+		m = le32_to_cpup(data);
+		v3 ^= m;
+		HSIPROUND;
+		v0 ^= m;
+	}
+	switch (left) {
+	case 3: b |= ((u32)end[2]) << 16;
+	case 2: b |= le16_to_cpup(data); break;
+	case 1: b |= end[0];
+	}
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+			 const hsiphash_key_t *key)
+{
+	const u8 *end = data + len - (len % sizeof(u32));
+	const u8 left = len & (sizeof(u32) - 1);
+	u32 m;
+	HPREAMBLE(len)
+	for (; data != end; data += sizeof(u32)) {
+		m = get_unaligned_le32(data);
+		v3 ^= m;
+		HSIPROUND;
+		v0 ^= m;
+	}
+	switch (left) {
+	case 3: b |= ((u32)end[2]) << 16;
+	case 2: b |= get_unaligned_le16(end); break;
+	case 1: b |= end[0];
+	}
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+	HPREAMBLE(4)
+	v3 ^= first;
+	HSIPROUND;
+	v0 ^= first;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+	HPREAMBLE(8)
+	v3 ^= first;
+	HSIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	HSIPROUND;
+	v0 ^= second;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+		  const hsiphash_key_t *key)
+{
+	HPREAMBLE(12)
+	v3 ^= first;
+	HSIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	HSIPROUND;
+	v0 ^= second;
+	v3 ^= third;
+	HSIPROUND;
+	v0 ^= third;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+		  const u32 forth, const hsiphash_key_t *key)
+{
+	HPREAMBLE(16)
+	v3 ^= first;
+	HSIPROUND;
+	v0 ^= first;
+	v3 ^= second;
+	HSIPROUND;
+	v0 ^= second;
+	v3 ^= third;
+	HSIPROUND;
+	v0 ^= third;
+	v3 ^= forth;
+	HSIPROUND;
+	v0 ^= forth;
+	HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/string.c b/lib/string.c
index ccabe16..ddd3907 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -772,6 +772,26 @@
 EXPORT_SYMBOL(memcmp);
 #endif
 
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+	return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
 #ifndef __HAVE_ARCH_MEMSCAN
 /**
  * memscan - Find a character in an area of memory.
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 8fcad61..7cde4c1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -356,7 +356,7 @@
 static noinline void __init ksize_unpoisons_memory(void)
 {
 	char *ptr;
-	size_t size = 123, real_size = size;
+	size_t size = 123, real_size;
 
 	pr_info("ksize() unpoisons the whole allocated chunk\n");
 	ptr = kmalloc(size, GFP_KERNEL);
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 563f10e..71ebfa4 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -24,24 +24,6 @@
 #define PAD_SIZE 16
 #define FILL_CHAR '$'
 
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES "         "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
 static unsigned total_tests __initdata;
 static unsigned failed_tests __initdata;
 static char *test_buffer __initdata;
@@ -217,30 +199,79 @@
 	test("a  |   |   ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
 }
 
+#define PLAIN_BUF_SIZE 64	/* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000"	/* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+	char buf[PLAIN_BUF_SIZE];
+	int nchars;
+
+	nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+	if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+		return -1;
+
+	return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+	/* Format is implicitly tested for 32 bit machines by plain_hash() */
+	return 0;
+}
+
+#endif	/* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+	char buf[PLAIN_BUF_SIZE];
+	int nchars;
+
+	nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+	if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
 static void __init
 plain(void)
 {
-	test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
-	/*
-	 * The field width is overloaded for some %p extensions to
-	 * pass another piece of information. For plain pointers, the
-	 * behaviour is slightly odd: One cannot pass either the 0
-	 * flag nor a precision to %p without gcc complaining, and if
-	 * one explicitly gives a field width, the number is no longer
-	 * zero-padded.
-	 */
-	test("|" PTR1_STR PTR1_SPACES "  |  " PTR1_SPACES PTR1_STR "|",
-	     "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
-	test("|" PTR2_STR "  |  " PTR2_STR "|",
-	     "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+	int err;
 
-	/*
-	 * Unrecognized %p extensions are treated as plain %p, but the
-	 * alphanumeric suffix is ignored (that is, does not occur in
-	 * the output.)
-	 */
-	test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
-	test("|"PTR2_STR"|", "|%p0y|", PTR2);
+	err = plain_hash();
+	if (err) {
+		pr_warn("plain 'p' does not appear to be hashed\n");
+		failed_tests++;
+		return;
+	}
+
+	err = plain_format();
+	if (err) {
+		pr_warn("hashing plain 'p' has unexpected format\n");
+		failed_tests++;
+	}
 }
 
 static void __init
@@ -251,6 +282,7 @@
 static void __init
 kernel_ptr(void)
 {
+	/* We can't test this without access to kptr_restrict. */
 }
 
 static void __init
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644
index 0000000..a6d854d
--- /dev/null
+++ b/lib/test_siphash.c
@@ -0,0 +1,223 @@
+/* Test cases for siphash.c
+ *
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ *     https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+	{{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+	0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+	0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+	0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+	0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+	0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+	0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+	0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+	0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+	0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+	0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+	0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+	0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+	0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+	0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+	0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+	0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+	0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+	0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+	0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+	0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+	0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+	0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+	{{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+	0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+	0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+	0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+	0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+	0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+	0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+	0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+	0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+	0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+	0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+	0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+	0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+	0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+	0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+	0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+	0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+	0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+	0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+	0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+	0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+	0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+	0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+	{{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+	0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+	0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+	0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+	0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+	0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+	0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+	0x06712339U, 0x522aca67U, 0x911bb605U,
+	0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+	0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+	0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+	0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+	0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+	0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+	0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+	0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+	0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+	0x007783ebU, 0x95766243U, 0xab639262U,
+	0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+	0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+	0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+	0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+	0x87178304U
+};
+#endif
+
+static int __init siphash_test_init(void)
+{
+	u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+	u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+	u8 i;
+	int ret = 0;
+
+	for (i = 0; i < 64; ++i) {
+		in[i] = i;
+		in_unaligned[i + 1] = i;
+		if (siphash(in, i, &test_key_siphash) !=
+						test_vectors_siphash[i]) {
+			pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
+			ret = -EINVAL;
+		}
+		if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
+						test_vectors_siphash[i]) {
+			pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
+			ret = -EINVAL;
+		}
+		if (hsiphash(in, i, &test_key_hsiphash) !=
+						test_vectors_hsiphash[i]) {
+			pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
+			ret = -EINVAL;
+		}
+		if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
+						test_vectors_hsiphash[i]) {
+			pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
+			ret = -EINVAL;
+		}
+	}
+	if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
+						test_vectors_siphash[8]) {
+		pr_info("siphash self-test 1u64: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+			 &test_key_siphash) != test_vectors_siphash[16]) {
+		pr_info("siphash self-test 2u64: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+			 0x1716151413121110ULL, &test_key_siphash) !=
+						test_vectors_siphash[24]) {
+		pr_info("siphash self-test 3u64: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+			 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+			 &test_key_siphash) != test_vectors_siphash[32]) {
+		pr_info("siphash self-test 4u64: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_1u32(0x03020100U, &test_key_siphash) !=
+						test_vectors_siphash[4]) {
+		pr_info("siphash self-test 1u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
+						test_vectors_siphash[8]) {
+		pr_info("siphash self-test 2u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_3u32(0x03020100U, 0x07060504U,
+			 0x0b0a0908U, &test_key_siphash) !=
+						test_vectors_siphash[12]) {
+		pr_info("siphash self-test 3u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (siphash_4u32(0x03020100U, 0x07060504U,
+			 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
+						test_vectors_siphash[16]) {
+		pr_info("siphash self-test 4u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
+						test_vectors_hsiphash[4]) {
+		pr_info("hsiphash self-test 1u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
+						test_vectors_hsiphash[8]) {
+		pr_info("hsiphash self-test 2u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (hsiphash_3u32(0x03020100U, 0x07060504U,
+			  0x0b0a0908U, &test_key_hsiphash) !=
+						test_vectors_hsiphash[12]) {
+		pr_info("hsiphash self-test 3u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (hsiphash_4u32(0x03020100U, 0x07060504U,
+			  0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
+						test_vectors_hsiphash[16]) {
+		pr_info("hsiphash self-test 4u32: FAIL\n");
+		ret = -EINVAL;
+	}
+	if (!ret)
+		pr_info("self-tests: pass\n");
+	return ret;
+}
+
+static void __exit siphash_test_exit(void)
+{
+}
+
+module_init(siphash_test_init);
+module_exit(siphash_test_exit);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 60e108c..c652b4a 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -86,11 +86,13 @@
 	return bits <= inline_bits;
 }
 
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
 {
 	if (is_inline_int(type)) {
 		unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
-		return ((s_max)val) << extra_bits >> extra_bits;
+		unsigned long ulong_val = (unsigned long)val;
+
+		return ((s_max)ulong_val) << extra_bits >> extra_bits;
 	}
 
 	if (type_bit_width(type) == 64)
@@ -99,15 +101,15 @@
 	return *(s_max *)val;
 }
 
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
 {
 	return type_is_signed(type) && get_signed_val(type, val) < 0;
 }
 
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
 {
 	if (is_inline_int(type))
-		return val;
+		return (unsigned long)val;
 
 	if (type_bit_width(type) == 64)
 		return *(u64 *)val;
@@ -116,7 +118,7 @@
 }
 
 static void val_to_string(char *str, size_t size, struct type_descriptor *type,
-	unsigned long value)
+			void *value)
 {
 	if (type_is_int(type)) {
 		if (type_bit_width(type) == 128) {
@@ -168,8 +170,8 @@
 	current->in_ubsan--;
 }
 
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
-			unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+			void *rhs, char op)
 {
 
 	struct type_descriptor *type = data->type;
@@ -196,8 +198,7 @@
 }
 
 void __ubsan_handle_add_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 
 	handle_overflow(data, lhs, rhs, '+');
@@ -205,23 +206,21 @@
 EXPORT_SYMBOL(__ubsan_handle_add_overflow);
 
 void __ubsan_handle_sub_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	handle_overflow(data, lhs, rhs, '-');
 }
 EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
 
 void __ubsan_handle_mul_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	handle_overflow(data, lhs, rhs, '*');
 }
 EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
 
 void __ubsan_handle_negate_overflow(struct overflow_data *data,
-				unsigned long old_val)
+				void *old_val)
 {
 	unsigned long flags;
 	char old_val_str[VALUE_LENGTH];
@@ -242,8 +241,7 @@
 
 
 void __ubsan_handle_divrem_overflow(struct overflow_data *data,
-				unsigned long lhs,
-				unsigned long rhs)
+				void *lhs, void *rhs)
 {
 	unsigned long flags;
 	char rhs_val_str[VALUE_LENGTH];
@@ -328,7 +326,7 @@
 }
 
 void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
-				unsigned long ptr)
+				void *ptr)
 {
 	struct type_mismatch_data_common common_data = {
 		.location = &data->location,
@@ -337,12 +335,12 @@
 		.type_check_kind = data->type_check_kind
 	};
 
-	ubsan_type_mismatch_common(&common_data, ptr);
+	ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
 
 void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
-				unsigned long ptr)
+				void *ptr)
 {
 
 	struct type_mismatch_data_common common_data = {
@@ -352,7 +350,7 @@
 		.type_check_kind = data->type_check_kind
 	};
 
-	ubsan_type_mismatch_common(&common_data, ptr);
+	ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
 
@@ -376,7 +374,7 @@
 EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
 
 void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
-					unsigned long bound)
+					void *bound)
 {
 	unsigned long flags;
 	char bound_str[VALUE_LENGTH];
@@ -393,8 +391,7 @@
 }
 EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
 
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
-				unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
 {
 	unsigned long flags;
 	char index_str[VALUE_LENGTH];
@@ -412,7 +409,7 @@
 EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
 
 void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
-					unsigned long lhs, unsigned long rhs)
+					void *lhs, void *rhs)
 {
 	unsigned long flags;
 	struct type_descriptor *rhs_type = data->rhs_type;
@@ -463,7 +460,7 @@
 EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
 
 void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
-				unsigned long val)
+				void *val)
 {
 	unsigned long flags;
 	char val_str[VALUE_LENGTH];
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 79ba3cc..43f9b90 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -32,6 +32,8 @@
 #include <linux/cred.h>
 #include <linux/uuid.h>
 #include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
 #ifdef CONFIG_BLOCK
 #include <linux/blkdev.h>
 #endif
@@ -1342,6 +1344,59 @@
 	return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+			 struct printf_spec spec)
+{
+	spec.base = 16;
+	spec.flags |= SMALL;
+	if (spec.field_width == -1) {
+		spec.field_width = 2 * sizeof(ptr);
+		spec.flags |= ZEROPAD;
+	}
+
+	switch (kptr_restrict) {
+	case 0:
+		/* Always print %pK values */
+		break;
+	case 1: {
+		const struct cred *cred;
+
+		/*
+		 * kptr_restrict==1 cannot be used in IRQ context
+		 * because its test for CAP_SYSLOG would be meaningless.
+		 */
+		if (in_irq() || in_serving_softirq() || in_nmi())
+			return string(buf, end, "pK-error", spec);
+
+		/*
+		 * Only print the real pointer value if the current
+		 * process has CAP_SYSLOG and is running with the
+		 * same credentials it started with. This is because
+		 * access to files is checked at open() time, but %pK
+		 * checks permission at read() time. We don't want to
+		 * leak pointer values if a binary opens a file using
+		 * %pK and then elevates privileges before reading it.
+		 */
+		cred = current_cred();
+		if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+		    !uid_eq(cred->euid, cred->uid) ||
+		    !gid_eq(cred->egid, cred->gid))
+			ptr = NULL;
+		break;
+	}
+	case 2:
+	default:
+		/* Always print 0's for %pK */
+		ptr = NULL;
+		break;
+	}
+
+	return number(buf, end, (unsigned long)ptr, spec);
+}
+
 static noinline_for_stack
 char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
 {
@@ -1467,7 +1522,86 @@
 	return format_flags(buf, end, flags, names);
 }
 
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+		     struct printf_spec spec)
+{
+	spec.base = 16;
+	spec.flags |= SMALL;
+	if (spec.field_width == -1) {
+		spec.field_width = 2 * sizeof(ptr);
+		spec.flags |= ZEROPAD;
+	}
+
+	return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+	get_random_bytes(&ptr_key, sizeof(ptr_key));
+	/*
+	 * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+	 * ptr_to_id() needs to see have_filled_random_ptr_key==true
+	 * after get_random_bytes() returns.
+	 */
+	smp_mb();
+	WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+	.func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+	int ret = add_random_ready_callback(&random_ready);
+
+	if (!ret) {
+		return 0;
+	} else if (ret == -EALREADY) {
+		fill_random_ptr_key(&random_ready);
+		return 0;
+	}
+
+	return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+	unsigned long hashval;
+	const int default_width = 2 * sizeof(ptr);
+
+	if (unlikely(!have_filled_random_ptr_key)) {
+		spec.field_width = default_width;
+		/* string length must be less than default_width */
+		return string(buf, end, "(ptrval)", spec);
+	}
+
+#ifdef CONFIG_64BIT
+	hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+	/*
+	 * Mask off the first 32 bits, this makes explicit that we have
+	 * modified the address (and 32 bits is plenty for a unique ID).
+	 */
+	hashval = hashval & 0xffffffff;
+#else
+	hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+	spec.flags |= SMALL;
+	if (spec.field_width == -1) {
+		spec.field_width = default_width;
+		spec.flags |= ZEROPAD;
+	}
+	spec.base = 16;
+
+	return number(buf, end, hashval, spec);
+}
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1561,11 +1695,16 @@
  *       g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
  *       v vma flags (VM_*) given as pointer to unsigned long
  *
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
  * ** Please update also Documentation/printk-formats.txt when making changes **
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
  */
 static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1655,47 +1794,7 @@
 			return buf;
 		}
 	case 'K':
-		switch (kptr_restrict) {
-		case 0:
-			/* Always print %pK values */
-			break;
-		case 1: {
-			const struct cred *cred;
-
-			/*
-			 * kptr_restrict==1 cannot be used in IRQ context
-			 * because its test for CAP_SYSLOG would be meaningless.
-			 */
-			if (in_irq() || in_serving_softirq() || in_nmi()) {
-				if (spec.field_width == -1)
-					spec.field_width = default_width;
-				return string(buf, end, "pK-error", spec);
-			}
-
-			/*
-			 * Only print the real pointer value if the current
-			 * process has CAP_SYSLOG and is running with the
-			 * same credentials it started with. This is because
-			 * access to files is checked at open() time, but %pK
-			 * checks permission at read() time. We don't want to
-			 * leak pointer values if a binary opens a file using
-			 * %pK and then elevates privileges before reading it.
-			 */
-			cred = current_cred();
-			if (!has_capability_noaudit(current, CAP_SYSLOG) ||
-			    !uid_eq(cred->euid, cred->uid) ||
-			    !gid_eq(cred->egid, cred->gid))
-				ptr = NULL;
-			break;
-		}
-		case 2:
-		default:
-			/* Always print 0's for %pK */
-			ptr = NULL;
-			break;
-		}
-		break;
-
+		return restricted_pointer(buf, end, ptr, spec);
 	case 'N':
 		return netdev_bits(buf, end, ptr, fmt);
 	case 'a':
@@ -1715,15 +1814,12 @@
 
 	case 'G':
 		return flags_string(buf, end, ptr, fmt);
+	case 'x':
+		return pointer_string(buf, end, ptr, spec);
 	}
-	spec.flags |= SMALL;
-	if (spec.field_width == -1) {
-		spec.field_width = default_width;
-		spec.flags |= ZEROPAD;
-	}
-	spec.base = 16;
 
-	return number(buf, end, (unsigned long) ptr, spec);
+	/* default is to _not_ leak addresses, hash before printing */
+	return ptr_to_id(buf, end, ptr, spec);
 }
 
 /*
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
old mode 100644
new mode 100755
index 62ca907..bdb7d16
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -690,6 +690,8 @@
 
 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 	bdi->cgwb_congested_tree = RB_ROOT;
+	atomic_set(&bdi->usage_cnt, 1);
+	init_rwsem(&bdi->wb_switch_rwsem);
 
 	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 	if (!ret) {
diff --git a/mm/cma.c b/mm/cma.c
index 1ccfaa1..677ed05 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -383,12 +383,14 @@
 
 	ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
 	if (ret)
-		goto err;
+		goto free_mem;
 
 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
 		&base);
 	return 0;
 
+free_mem:
+	memblock_free(base, size);
 err:
 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
 	return ret;
diff --git a/mm/compaction.c b/mm/compaction.c
index f002a7f..6a4c4eb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -20,6 +20,7 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/page_owner.h>
+#include <linux/psi.h>
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
@@ -2005,11 +2006,15 @@
 	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
 
 	while (!kthread_should_stop()) {
+		unsigned long pflags;
+
 		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
 		wait_event_freezable(pgdat->kcompactd_wait,
 				kcompactd_work_requested(pgdat));
 
+		psi_memstall_enter(&pflags);
 		kcompactd_do_work(pgdat);
+		psi_memstall_leave(&pflags);
 	}
 
 	return 0;
diff --git a/mm/debug.c b/mm/debug.c
index bebe48a..dc7ad1f 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -49,7 +49,7 @@
 	 */
 	int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
-	pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
+	pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
 		  page, page_ref_count(page), mapcount,
 		  page->mapping, page_to_pgoff(page));
 	if (PageCompound(page))
@@ -64,7 +64,7 @@
 
 #ifdef CONFIG_MEMCG
 	if (page->mem_cgroup)
-		pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
+		pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
 
@@ -79,10 +79,10 @@
 
 void dump_vma(const struct vm_area_struct *vma)
 {
-	pr_emerg("vma %p start %p end %p\n"
-		"next %p prev %p mm %p\n"
-		"prot %lx anon_vma %p vm_ops %p\n"
-		"pgoff %lx file %p private_data %p\n"
+	pr_emerg("vma %px start %px end %px\n"
+		"next %px prev %px mm %px\n"
+		"prot %lx anon_vma %px vm_ops %px\n"
+		"pgoff %lx file %px private_data %px\n"
 		"flags: %#lx(%pGv)\n",
 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
 		vma->vm_prev, vma->vm_mm,
@@ -95,27 +95,27 @@
 
 void dump_mm(const struct mm_struct *mm)
 {
-	pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
+	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
-		"get_unmapped_area %p\n"
+		"get_unmapped_area %px\n"
 #endif
 		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
-		"pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
+		"pgd %px mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
 		"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
 		"start_brk %lx brk %lx start_stack %lx\n"
 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
-		"binfmt %p flags %lx core_state %p\n"
+		"binfmt %px flags %lx core_state %px\n"
 #ifdef CONFIG_AIO
-		"ioctx_table %p\n"
+		"ioctx_table %px\n"
 #endif
 #ifdef CONFIG_MEMCG
-		"owner %p "
+		"owner %px "
 #endif
-		"exe_file %p\n"
+		"exe_file %px\n"
 #ifdef CONFIG_MMU_NOTIFIER
-		"mmu_notifier_mm %p\n"
+		"mmu_notifier_mm %px\n"
 #endif
 #ifdef CONFIG_NUMA_BALANCING
 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
diff --git a/mm/filemap.c b/mm/filemap.c
index 4fcd8ee..1a5780b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -35,6 +35,8 @@
 #include <linux/memcontrol.h>
 #include <linux/cleancache.h>
 #include <linux/rmap.h>
+#include <linux/delayacct.h>
+#include <linux/psi.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -747,12 +749,9 @@
 		 * data from the working set, only to cache data that will
 		 * get overwritten with something else, is a waste of memory.
 		 */
-		if (!(gfp_mask & __GFP_WRITE) &&
-		    shadow && workingset_refault(shadow)) {
-			SetPageActive(page);
-			workingset_activation(page);
-		} else
-			ClearPageActive(page);
+		WARN_ON_ONCE(PageActive(page));
+		if (!(gfp_mask & __GFP_WRITE) && shadow)
+			workingset_refault(page, shadow);
 		lru_cache_add(page);
 	}
 	return ret;
@@ -790,46 +789,177 @@
  * at a cost of "thundering herd" phenomena during rare hash
  * collisions.
  */
-wait_queue_head_t *page_waitqueue(struct page *page)
+#define PAGE_WAIT_TABLE_BITS 8
+#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
+static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
+
+static wait_queue_head_t *page_waitqueue(struct page *page)
 {
-	return bit_waitqueue(page, 0);
+	return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
 }
-EXPORT_SYMBOL(page_waitqueue);
+
+void __init pagecache_init(void)
+{
+	int i;
+
+	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
+		init_waitqueue_head(&page_wait_table[i]);
+
+	page_writeback_init();
+}
+
+struct wait_page_key {
+	struct page *page;
+	int bit_nr;
+	int page_match;
+};
+
+struct wait_page_queue {
+	struct page *page;
+	int bit_nr;
+	wait_queue_t wait;
+};
+
+static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
+{
+	struct wait_page_key *key = arg;
+	struct wait_page_queue *wait_page
+		= container_of(wait, struct wait_page_queue, wait);
+
+	if (wait_page->page != key->page)
+	       return 0;
+	key->page_match = 1;
+
+	if (wait_page->bit_nr != key->bit_nr)
+		return 0;
+	if (test_bit(key->bit_nr, &key->page->flags))
+		return 0;
+
+	return autoremove_wake_function(wait, mode, sync, key);
+}
+
+void wake_up_page_bit(struct page *page, int bit_nr)
+{
+	wait_queue_head_t *q = page_waitqueue(page);
+	struct wait_page_key key;
+	unsigned long flags;
+
+	key.page = page;
+	key.bit_nr = bit_nr;
+	key.page_match = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_locked_key(q, TASK_NORMAL, &key);
+	/*
+	 * It is possible for other pages to have collided on the waitqueue
+	 * hash, so in that case check for a page match. That prevents a long-
+	 * term waiter
+	 *
+	 * It is still possible to miss a case here, when we woke page waiters
+	 * and removed them from the waitqueue, but there are still other
+	 * page waiters.
+	 */
+	if (!waitqueue_active(q) || !key.page_match) {
+		ClearPageWaiters(page);
+		/*
+		 * It's possible to miss clearing Waiters here, when we woke
+		 * our page waiters, but the hashed waitqueue has waiters for
+		 * other pages on it.
+		 *
+		 * That's okay, it's a rare case. The next waker will clear it.
+		 */
+	}
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(wake_up_page_bit);
+
+static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+		struct page *page, int bit_nr, int state, bool lock)
+{
+	struct wait_page_queue wait_page;
+	wait_queue_t *wait = &wait_page.wait;
+	bool thrashing = false;
+	unsigned long pflags;
+	int ret = 0;
+
+	if (bit_nr == PG_locked &&
+	    !PageUptodate(page) && PageWorkingset(page)) {
+		if (!PageSwapBacked(page))
+			delayacct_thrashing_start();
+		psi_memstall_enter(&pflags);
+		thrashing = true;
+	}
+
+	init_wait(wait);
+	wait->func = wake_page_function;
+	wait_page.page = page;
+	wait_page.bit_nr = bit_nr;
+
+	for (;;) {
+		spin_lock_irq(&q->lock);
+
+		if (likely(list_empty(&wait->task_list))) {
+			if (lock)
+				__add_wait_queue_tail_exclusive(q, wait);
+			else
+				__add_wait_queue(q, wait);
+			SetPageWaiters(page);
+		}
+
+		set_current_state(state);
+
+		spin_unlock_irq(&q->lock);
+
+		if (likely(test_bit(bit_nr, &page->flags))) {
+			io_schedule();
+		}
+
+		if (lock) {
+			if (!test_and_set_bit_lock(bit_nr, &page->flags))
+				break;
+		} else {
+			if (!test_bit(bit_nr, &page->flags))
+				break;
+		}
+
+		if (unlikely(signal_pending_state(state, current))) {
+			ret = -EINTR;
+			break;
+		}
+	}
+
+	finish_wait(q, wait);
+
+	if (thrashing) {
+		if (!PageSwapBacked(page))
+			delayacct_thrashing_end();
+		psi_memstall_leave(&pflags);
+	}
+
+	/*
+	 * A signal could leave PageWaiters set. Clearing it here if
+	 * !waitqueue_active would be possible (by open-coding finish_wait),
+	 * but still fail to catch it in the case of wait hash collision. We
+	 * already can fail to clear wait hash collision cases, so don't
+	 * bother with signals either.
+	 */
+
+	return ret;
+}
 
 void wait_on_page_bit(struct page *page, int bit_nr)
 {
-	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
-
-	if (test_bit(bit_nr, &page->flags))
-		__wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
-							TASK_UNINTERRUPTIBLE);
+	wait_queue_head_t *q = page_waitqueue(page);
+	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
 int wait_on_page_bit_killable(struct page *page, int bit_nr)
 {
-	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
-
-	if (!test_bit(bit_nr, &page->flags))
-		return 0;
-
-	return __wait_on_bit(page_waitqueue(page), &wait,
-			     bit_wait_io, TASK_KILLABLE);
+	wait_queue_head_t *q = page_waitqueue(page);
+	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
 }
 
-int wait_on_page_bit_killable_timeout(struct page *page,
-				       int bit_nr, unsigned long timeout)
-{
-	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
-
-	wait.key.timeout = jiffies + timeout;
-	if (!test_bit(bit_nr, &page->flags))
-		return 0;
-	return __wait_on_bit(page_waitqueue(page), &wait,
-			     bit_wait_io_timeout, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout);
-
 /**
  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
  * @page: Page defining the wait queue of interest
@@ -844,6 +974,7 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	__add_wait_queue(q, waiter);
+	SetPageWaiters(page);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL_GPL(add_page_wait_queue);
@@ -928,23 +1059,19 @@
  * __lock_page - get a lock on the page, assuming we need to sleep to get it
  * @page: the page to lock
  */
-void __lock_page(struct page *page)
+void __lock_page(struct page *__page)
 {
-	struct page *page_head = compound_head(page);
-	DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
-
-	__wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
-							TASK_UNINTERRUPTIBLE);
+	struct page *page = compound_head(__page);
+	wait_queue_head_t *q = page_waitqueue(page);
+	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
 }
 EXPORT_SYMBOL(__lock_page);
 
-int __lock_page_killable(struct page *page)
+int __lock_page_killable(struct page *__page)
 {
-	struct page *page_head = compound_head(page);
-	DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
-
-	return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
-					bit_wait_io, TASK_KILLABLE);
+	struct page *page = compound_head(__page);
+	wait_queue_head_t *q = page_waitqueue(page);
+	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
@@ -1190,6 +1317,9 @@
  *		@gfp_mask and added to the page cache and the VM's LRU
  *		list. The page is returned locked and with an increased
  *		refcount. Otherwise, %NULL is returned.
+ * FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
+ *   its own locking dance if the page is already in cache, or unlock the page
+ *   before returning if we had to add the page to pagecache.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
@@ -1242,7 +1372,7 @@
 		if (!page)
 			return NULL;
 
-		if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
+		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
 			fgp_flags |= FGP_LOCK;
 
 		/* Init accessed so avoid atomic mark_page_accessed later */
@@ -1256,6 +1386,14 @@
 			if (err == -EEXIST)
 				goto repeat;
 		}
+
+		/*
+		 * add_to_page_cache_lru lock's the page, and for mmap we expect
+		 * a unlocked page.
+		 */
+		if (page && (fgp_flags & FGP_FOR_MMAP))
+			unlock_page(page);
+
 	}
 
 	return page;
@@ -1992,62 +2130,96 @@
 EXPORT_SYMBOL(generic_file_read_iter);
 
 #ifdef CONFIG_MMU
-/**
- * page_cache_read - adds requested page to the page cache if not already there
- * @file:	file to read
- * @offset:	page index
- * @gfp_mask:	memory allocation flags
- *
- * This adds the requested page to the page cache if it isn't already there,
- * and schedules an I/O to read in its contents from disk.
- */
-static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
-{
-	struct address_space *mapping = file->f_mapping;
-	struct page *page;
-	int ret;
-
-	do {
-		page = __page_cache_alloc(gfp_mask|__GFP_COLD);
-		if (!page)
-			return -ENOMEM;
-
-		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
-		if (ret == 0)
-			ret = mapping->a_ops->readpage(file, page);
-		else if (ret == -EEXIST)
-			ret = 0; /* losing race to add is OK */
-
-		put_page(page);
-
-	} while (ret == AOP_TRUNCATED_PAGE);
-
-	return ret;
-}
-
 #define MMAP_LOTSAMISS  (100)
 
+static struct file *maybe_unlock_mmap_for_io(struct vm_area_struct *vma,
+		unsigned long flags, struct file *fpin)
+{
+	if (fpin)
+		return fpin;
+
+	/*
+	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
+	 * anything, so we only pin the file and drop the mmap_sem if only
+	 * FAULT_FLAG_ALLOW_RETRY is set.
+	 */
+	if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+			FAULT_FLAG_ALLOW_RETRY) {
+		fpin = get_file(vma->vm_file);
+		up_read(&vma->vm_mm->mmap_sem);
+	}
+	return fpin;
+}
+
 /*
- * Synchronous readahead happens when we don't even find
- * a page in the page cache at all.
+ * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
+ * @vmf - the vm_fault for this fault.
+ * @page - the page to lock.
+ * @fpin - the pointer to the file we may pin (or is already pinned).
+ *
+ * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
+ * It differs in that it actually returns the page locked if it returns 1 and 0
+ * if it couldn't lock the page.  If we did have to drop the mmap_sem then fpin
+ * will point to the pinned file and needs to be fput()'ed at a later point.
  */
-static void do_sync_mmap_readahead(struct vm_area_struct *vma,
+static int lock_page_maybe_drop_mmap(struct vm_area_struct *vma,
+		unsigned long flags, struct page *page, struct file **fpin)
+{
+	if (trylock_page(page))
+		return 1;
+
+	/*
+	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
+	 * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
+	 * is supposed to work. We have way too many special cases..
+	 */
+	if (flags & FAULT_FLAG_RETRY_NOWAIT)
+		return 0;
+	*fpin = maybe_unlock_mmap_for_io(vma, flags, *fpin);
+	if (flags & FAULT_FLAG_KILLABLE) {
+		if (__lock_page_killable(page)) {
+			/*
+			 * We didn't have the right flags to drop the mmap_sem,
+			 * but all fault_handlers only check for fatal signals
+			 * if we return VM_FAULT_RETRY, so we need to drop the
+			 * mmap_sem here and return 0 if we don't have a fpin.
+			 */
+			if (*fpin == NULL)
+				up_read(&vma->vm_mm->mmap_sem);
+			return 0;
+		}
+	} else
+		__lock_page(page);
+	return 1;
+}
+
+/*
+ * Synchronous readahead happens when we don't even find a page in the page
+ * cache at all.  We don't want to perform IO under the mmap sem, so if we have
+ * to drop the mmap sem we return the file that was pinned in order for us to do
+ * that.  If we didn't pin a file then we return NULL.  The file that is
+ * returned needs to be fput()'ed when we're done with it.
+ */
+static struct file *do_sync_mmap_readahead(struct vm_area_struct *vma,
+				   unsigned long flags,
 				   struct file_ra_state *ra,
 				   struct file *file,
 				   pgoff_t offset)
 {
+	struct file *fpin = NULL;
 	struct address_space *mapping = file->f_mapping;
 
 	/* If we don't want any read-ahead, don't bother */
 	if (vma->vm_flags & VM_RAND_READ)
-		return;
+		return fpin;
 	if (!ra->ra_pages)
-		return;
+		return fpin;
 
 	if (vma->vm_flags & VM_SEQ_READ) {
+		fpin = maybe_unlock_mmap_for_io(vma, flags, fpin);
 		page_cache_sync_readahead(mapping, ra, file, offset,
 					  ra->ra_pages);
-		return;
+		return fpin;
 	}
 
 	/* Avoid banging the cache line if not needed */
@@ -2059,37 +2231,45 @@
 	 * stop bothering with read-ahead. It will only hurt.
 	 */
 	if (ra->mmap_miss > MMAP_LOTSAMISS)
-		return;
+		return fpin;
 
 	/*
 	 * mmap read-around
 	 */
+	fpin = maybe_unlock_mmap_for_io(vma, flags, fpin);
 	ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
 	ra->size = ra->ra_pages;
 	ra->async_size = ra->ra_pages / 4;
 	ra_submit(ra, mapping, file);
+	return fpin;
 }
 
 /*
  * Asynchronous readahead happens when we find the page and PG_readahead,
- * so we want to possibly extend the readahead further..
+ * so we want to possibly extend the readahead further.  We return the file that
+ * was pinned if we have to drop the mmap_sem in order to do IO.
  */
-static void do_async_mmap_readahead(struct vm_area_struct *vma,
+static struct file *do_async_mmap_readahead(struct vm_area_struct *vma,
+				    unsigned long flags,
 				    struct file_ra_state *ra,
 				    struct file *file,
 				    struct page *page,
 				    pgoff_t offset)
 {
 	struct address_space *mapping = file->f_mapping;
+	struct file *fpin = NULL;
 
 	/* If we don't want any read-ahead, don't bother */
 	if (vma->vm_flags & VM_RAND_READ)
-		return;
+		return fpin;
 	if (ra->mmap_miss > 0)
 		ra->mmap_miss--;
-	if (PageReadahead(page))
+	if (PageReadahead(page)) {
+		fpin = maybe_unlock_mmap_for_io(vma, flags, fpin);
 		page_cache_async_readahead(mapping, ra, file,
 					   page, offset, ra->ra_pages);
+	}
+	return fpin;
 }
 
 /**
@@ -2120,6 +2300,7 @@
 {
 	int error;
 	struct file *file = vma->vm_file;
+	struct file *fpin = NULL;
 	struct address_space *mapping = file->f_mapping;
 	struct file_ra_state *ra = &file->f_ra;
 	struct inode *inode = mapping->host;
@@ -2141,23 +2322,27 @@
 		 * We found the page, so try async readahead before
 		 * waiting for the lock.
 		 */
-		do_async_mmap_readahead(vma, ra, file, page, offset);
+		fpin = do_async_mmap_readahead(vma, vmf->flags, ra,
+						file, page, offset);
 	} else if (!page) {
 		/* No page in the page cache at all */
-		do_sync_mmap_readahead(vma, ra, file, offset);
 		count_vm_event(PGMAJFAULT);
 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
 		ret = VM_FAULT_MAJOR;
+		fpin = do_sync_mmap_readahead(vma, vmf->flags, ra,
+						file, offset);
 retry_find:
-		page = find_get_page(mapping, offset);
-		if (!page)
-			goto no_cached_page;
+		page = pagecache_get_page(mapping, offset,
+					  FGP_CREAT|FGP_FOR_MMAP,
+					  vmf->gfp_mask);
+		if (!page) {
+			if (fpin)
+				goto out_retry;
+			return VM_FAULT_OOM;
+		}
 	}
-
-	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-		put_page(page);
-		return ret | VM_FAULT_RETRY;
-	}
+	if (!lock_page_maybe_drop_mmap(vma, vmf->flags, page, &fpin))
+		goto out_retry;
 
 	/* Did it get truncated? */
 	if (unlikely(page->mapping != mapping)) {
@@ -2175,6 +2360,16 @@
 		goto page_not_uptodate;
 
 	/*
+	 * We've made it this far and we had to drop our mmap_sem, now is the
+	 * time to return to the upper layer and have it re-find the vma and
+	 * redo the fault.
+	 */
+	if (fpin) {
+		unlock_page(page);
+		goto out_retry;
+	}
+
+	/*
 	 * Found the page and have a reference on it.
 	 * We must recheck i_size under page lock.
 	 */
@@ -2188,30 +2383,6 @@
 	vmf->page = page;
 	return ret | VM_FAULT_LOCKED;
 
-no_cached_page:
-	/*
-	 * We're only likely to ever get here if MADV_RANDOM is in
-	 * effect.
-	 */
-	error = page_cache_read(file, offset, vmf->gfp_mask);
-
-	/*
-	 * The page we want has now been added to the page cache.
-	 * In the unlikely event that someone removed it in the
-	 * meantime, we'll just come back here and read it again.
-	 */
-	if (error >= 0)
-		goto retry_find;
-
-	/*
-	 * An error return from page_cache_read can result if the
-	 * system is low on memory, or a problem occurs while trying
-	 * to schedule I/O.
-	 */
-	if (error == -ENOMEM)
-		return VM_FAULT_OOM;
-	return VM_FAULT_SIGBUS;
-
 page_not_uptodate:
 	/*
 	 * Umm, take care of errors if the page isn't up-to-date.
@@ -2220,12 +2391,15 @@
 	 * and we need to check for errors.
 	 */
 	ClearPageError(page);
+	fpin = maybe_unlock_mmap_for_io(vma, vmf->flags, fpin);
 	error = mapping->a_ops->readpage(file, page);
 	if (!error) {
 		wait_on_page_locked(page);
 		if (!PageUptodate(page))
 			error = -EIO;
 	}
+	if (fpin)
+		goto out_retry;
 	put_page(page);
 
 	if (!error || error == AOP_TRUNCATED_PAGE)
@@ -2234,6 +2408,18 @@
 	/* Things didn't work out. Return zero to tell the mm layer so. */
 	shrink_readahead_size_eio(file, ra);
 	return VM_FAULT_SIGBUS;
+
+out_retry:
+	/*
+	 * We dropped the mmap_sem, we need to return to the fault handler to
+	 * re-find the vma and come back and find our hopefully still populated
+	 * page.
+	 */
+	if (page)
+		put_page(page);
+	if (fpin)
+		fput(fpin);
+	return ret | VM_FAULT_RETRY;
 }
 EXPORT_SYMBOL(filemap_fault);
 
diff --git a/mm/gup.c b/mm/gup.c
index d71da72..99c2f10 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1423,7 +1423,8 @@
 		if (pmd_none(pmd))
 			return 0;
 
-		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
+		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
+			     pmd_devmap(pmd))) {
 			/*
 			 * NUMA hinting faults need to be handled in the GUP
 			 * slowpath for accounting purposes and so that they
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2541f3a..c24ff26 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1890,6 +1890,7 @@
 			 (1L << PG_mlocked) |
 			 (1L << PG_uptodate) |
 			 (1L << PG_active) |
+			 (1L << PG_workingset) |
 			 (1L << PG_locked) |
 			 (1L << PG_unevictable) |
 			 (1L << PG_dirty)));
diff --git a/mm/internal.h b/mm/internal.h
index d9ac6de..1e93b2a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -36,6 +36,8 @@
 /* Do not use these with a slab allocator */
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
+void page_writeback_init(void);
+
 int do_swap_page(struct fault_env *fe, pte_t orig_pte);
 
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 627c699..3c57210 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -90,7 +90,14 @@
 /* Unpoison the stack for the current task beyond a watermark sp value. */
 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 {
-	__kasan_unpoison_stack(current, watermark);
+	/*
+	 * Calculate the task stack base address.  Avoid using 'current'
+	 * because this function is called by early resume code which hasn't
+	 * yet set up the percpu register (%gs).
+	 */
+	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
+
+	kasan_unpoison_shadow(base, watermark - base);
 }
 
 /*
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 3f9a41c..31238da 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -15,6 +15,7 @@
 #include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/memblock.h>
+#include <linux/mm.h>
 #include <linux/pfn.h>
 
 #include <asm/page.h>
@@ -49,7 +50,7 @@
 	pte_t *pte = pte_offset_kernel(pmd, addr);
 	pte_t zero_pte;
 
-	zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+	zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
 	zero_pte = pte_wrprotect(zero_pte);
 
 	while (addr + PAGE_SIZE <= end) {
@@ -69,7 +70,7 @@
 		next = pmd_addr_end(addr, end);
 
 		if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
-			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
 			continue;
 		}
 
@@ -92,9 +93,9 @@
 		if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
 			pmd_t *pmd;
 
-			pud_populate(&init_mm, pud, kasan_zero_pmd);
+			pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
 			pmd = pmd_offset(pud, addr);
-			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
 			continue;
 		}
 
@@ -135,11 +136,11 @@
 			 * puds,pmds, so pgd_populate(), pud_populate()
 			 * is noops.
 			 */
-			pgd_populate(&init_mm, pgd, kasan_zero_pud);
+			pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
 			pud = pud_offset(pgd, addr);
-			pud_populate(&init_mm, pud, kasan_zero_pmd);
+			pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
 			pmd = pmd_offset(pud, addr);
-			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
 			continue;
 		}
 
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 5f2b9eb..8698e1c 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -138,7 +138,7 @@
 
 	pr_err("BUG: KASAN: %s in %pS\n",
 		bug_type, (void *)info->ip);
-	pr_err("%s of size %zu at addr %p by task %s/%d\n",
+	pr_err("%s of size %zu at addr %px by task %s/%d\n",
 		info->is_write ? "Write" : "Read", info->access_size,
 		info->access_addr, current->comm, task_pid_nr(current));
 }
@@ -210,7 +210,7 @@
 	const char *rel_type;
 	int rel_bytes;
 
-	pr_err("The buggy address belongs to the object at %p\n"
+	pr_err("The buggy address belongs to the object at %px\n"
 	       " which belongs to the cache %s of size %d\n",
 		object, cache->name, cache->object_size);
 
@@ -229,7 +229,7 @@
 	}
 
 	pr_err("The buggy address is located %d bytes %s of\n"
-	       " %d-byte region [%p, %p)\n",
+	       " %d-byte region [%px, %px)\n",
 		rel_bytes, rel_type, cache->object_size, (void *)object_addr,
 		(void *)(object_addr + cache->object_size));
 }
@@ -306,7 +306,7 @@
 		char shadow_buf[SHADOW_BYTES_PER_ROW];
 
 		snprintf(buffer, sizeof(buffer),
-			(i == 0) ? ">%p: " : " %p: ", kaddr);
+			(i == 0) ? ">%px: " : " %px: ", kaddr);
 		/*
 		 * We should not pass a shadow pointer to generic
 		 * function, because generic functions may try to
@@ -405,6 +405,7 @@
 	disable_trace_on_warning();
 
 	info.access_addr = (void *)addr;
+	info.first_bad_addr = (void *)addr;
 	info.access_size = size;
 	info.is_write = is_write;
 	info.ip = ip;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 27e6d17..a17d9c0 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1705,19 +1705,17 @@
 	struct page *hpage = compound_head(page);
 
 	if (!PageHuge(page) && PageTransHuge(hpage)) {
-		lock_page(hpage);
-		if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
-			unlock_page(hpage);
-			if (!PageAnon(hpage))
+		lock_page(page);
+		if (!PageAnon(page) || unlikely(split_huge_page(page))) {
+			unlock_page(page);
+			if (!PageAnon(page))
 				pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
 			else
 				pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
-			put_hwpoison_page(hpage);
+			put_hwpoison_page(page);
 			return -EBUSY;
 		}
-		unlock_page(hpage);
-		get_hwpoison_page(page);
-		put_hwpoison_page(hpage);
+		unlock_page(page);
 	}
 
 	if (PageHuge(page))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ac1dae6..beb7c33 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -550,11 +550,16 @@
 			goto retry;
 		}
 
-		migrate_page_add(page, qp->pagelist, flags);
+		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+			if (!vma_migratable(vma))
+				break;
+			migrate_page_add(page, qp->pagelist, flags);
+		} else
+			break;
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
-	return 0;
+	return addr != end ? -EIO : 0;
 }
 
 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -628,7 +633,12 @@
 	unsigned long endvma = vma->vm_end;
 	unsigned long flags = qp->flags;
 
-	if (!vma_migratable(vma))
+	/*
+	 * Need check MPOL_MF_STRICT to return -EIO if possible
+	 * regardless of vma_migratable
+	 */
+	if (!vma_migratable(vma) &&
+	    !(flags & MPOL_MF_STRICT))
 		return 1;
 
 	if (endvma > end)
@@ -655,7 +665,7 @@
 	}
 
 	/* queue pages from current vma */
-	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+	if (flags & MPOL_MF_VALID)
 		return 0;
 	return 1;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index d793289..2317b42 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -637,6 +637,8 @@
 		SetPageActive(newpage);
 	} else if (TestClearPageUnevictable(page))
 		SetPageUnevictable(newpage);
+	if (PageWorkingset(page))
+		SetPageWorkingset(newpage);
 	if (PageChecked(page))
 		SetPageChecked(newpage);
 	if (PageMappedToDisk(page))
diff --git a/mm/mincore.c b/mm/mincore.c
index bfb8664..3b6a883 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -167,6 +167,22 @@
 	return 0;
 }
 
+static inline bool can_do_mincore(struct vm_area_struct *vma)
+{
+	if (vma_is_anonymous(vma))
+		return true;
+	if (!vma->vm_file)
+		return false;
+	/*
+	 * Reveal pagecache information only for non-anonymous mappings that
+	 * correspond to the files the calling process could (if tried) open
+	 * for writing; otherwise we'd be including shared non-exclusive
+	 * mappings, which opens a side channel.
+	 */
+	return inode_owner_or_capable(file_inode(vma->vm_file)) ||
+		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
+}
+
 /*
  * Do a chunk of "sys_mincore()". We've already checked
  * all the arguments, we hold the mmap semaphore: we should
@@ -187,8 +203,13 @@
 	vma = find_vma(current->mm, addr);
 	if (!vma || addr < vma->vm_start)
 		return -ENOMEM;
-	mincore_walk.mm = vma->vm_mm;
 	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
+	if (!can_do_mincore(vma)) {
+		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
+		memset(vec, 1, pages);
+		return pages;
+	}
+	mincore_walk.mm = vma->vm_mm;
 	err = walk_page_range(addr, end, &mincore_walk);
 	if (err < 0)
 		return err;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
old mode 100644
new mode 100755
index c6ab8e8..f77c4ce
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -65,6 +65,7 @@
 #include <linux/kthread.h>
 #include <linux/memcontrol.h>
 #include <linux/show_mem_notifier.h>
+#include <linux/psi.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -3247,15 +3248,20 @@
 		enum compact_priority prio, enum compact_result *compact_result)
 {
 	struct page *page;
+	unsigned long pflags;
 	unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
 
 	if (!order)
 		return NULL;
 
+	psi_memstall_enter(&pflags);
 	current->flags |= PF_MEMALLOC;
+
 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
 									prio);
+
 	current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
+	psi_memstall_leave(&pflags);
 
 	if (*compact_result <= COMPACT_INACTIVE)
 		return NULL;
@@ -3435,11 +3441,13 @@
 {
 	struct reclaim_state reclaim_state;
 	int progress;
+	unsigned long pflags;
 
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
 	cpuset_memory_pressure_bump();
+	psi_memstall_enter(&pflags);
 	current->flags |= PF_MEMALLOC;
 	lockdep_set_current_reclaim_state(gfp_mask);
 	reclaim_state.reclaimed_slab = 0;
@@ -3451,6 +3459,7 @@
 	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
 	current->flags &= ~PF_MEMALLOC;
+	psi_memstall_leave(&pflags);
 
 	cond_resched();
 
@@ -4092,11 +4101,11 @@
 		/* Even if we own the page, we do not use atomic_set().
 		 * This would break get_page_unless_zero() users.
 		 */
-		page_ref_add(page, size - 1);
+		page_ref_add(page, size);
 
 		/* reset page count bias and offset to start of new frag */
 		nc->pfmemalloc = page_is_pfmemalloc(page);
-		nc->pagecnt_bias = size;
+		nc->pagecnt_bias = size + 1;
 		nc->offset = size;
 	}
 
@@ -4112,10 +4121,10 @@
 		size = nc->size;
 #endif
 		/* OK, page count is 0, we can safely set it */
-		set_page_count(page, size);
+		set_page_count(page, size + 1);
 
 		/* reset page count bias and offset to start of new frag */
-		nc->pagecnt_bias = size;
+		nc->pagecnt_bias = size + 1;
 		offset = size - fragsz;
 	}
 
diff --git a/mm/page_ext.c b/mm/page_ext.c
index fc3e7ff..561ed6e 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -283,6 +283,7 @@
 		table_size = get_entry_size() * PAGES_PER_SECTION;
 
 		BUG_ON(PageReserved(page));
+		kmemleak_free(addr);
 		free_pages_exact(addr, table_size);
 	}
 }
diff --git a/mm/percpu.c b/mm/percpu.c
index 3794cfc..0462a2a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2048,8 +2048,8 @@
 		ai->groups[group].base_offset = areas[group] - base;
 	}
 
-	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
 		ai->dyn_size, ai->unit_size);
 
 	rc = pcpu_setup_first_chunk(ai, base);
@@ -2162,8 +2162,8 @@
 	}
 
 	/* we're ready, commit */
-	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
-		unit_pages, psize_str, vm.addr, ai->static_size,
+	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+		unit_pages, psize_str, ai->static_size,
 		ai->reserved_size, ai->dyn_size);
 
 	rc = pcpu_setup_first_chunk(ai, vm.addr);
diff --git a/mm/shmem.c b/mm/shmem.c
index d15851d..793a8cf 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2896,16 +2896,20 @@
 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
 	struct inode *inode = d_inode(old_dentry);
-	int ret;
+	int ret = 0;
 
 	/*
 	 * No ordinary (disk based) filesystem counts links as inodes;
 	 * but each new link needs a new dentry, pinning lowmem, and
 	 * tmpfs dentries cannot be pruned until they are unlinked.
+	 * But if an O_TMPFILE file is linked into the tmpfs, the
+	 * first link must skip that, to get the accounting right.
 	 */
-	ret = shmem_reserve_inode(inode->i_sb);
-	if (ret)
-		goto out;
+	if (inode->i_nlink) {
+		ret = shmem_reserve_inode(inode->i_sb);
+		if (ret)
+			goto out;
+	}
 
 	dir->i_size += BOGO_DIRENT_SIZE;
 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 354a09d..bd498ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -566,14 +566,6 @@
 
 static void init_arraycache(struct array_cache *ac, int limit, int batch)
 {
-	/*
-	 * The array_cache structures contain pointers to free object.
-	 * However, when such objects are allocated or transferred to another
-	 * cache the pointers are not cleared and they could be counted as
-	 * valid references during a kmemleak scan. Therefore, kmemleak must
-	 * not scan such objects.
-	 */
-	kmemleak_no_scan(ac);
 	if (ac) {
 		ac->avail = 0;
 		ac->limit = limit;
@@ -589,6 +581,14 @@
 	struct array_cache *ac = NULL;
 
 	ac = kmalloc_node(memsize, gfp, node);
+	/*
+	 * The array_cache structures contain pointers to free object.
+	 * However, when such objects are allocated or transferred to another
+	 * cache the pointers are not cleared and they could be counted as
+	 * valid references during a kmemleak scan. Therefore, kmemleak must
+	 * not scan such objects.
+	 */
+	kmemleak_no_scan(ac);
 	init_arraycache(ac, entries, batchcount);
 	return ac;
 }
@@ -683,6 +683,7 @@
 
 	alc = kmalloc_node(memsize, gfp, node);
 	if (alc) {
+		kmemleak_no_scan(alc);
 		init_arraycache(&alc->ac, entries, batch);
 		spin_lock_init(&alc->lock);
 	}
@@ -1620,11 +1621,8 @@
 		       *dbg_redzone2(cachep, objp));
 	}
 
-	if (cachep->flags & SLAB_STORE_USER) {
-		pr_err("Last user: [<%p>](%pSR)\n",
-		       *dbg_userword(cachep, objp),
-		       *dbg_userword(cachep, objp));
-	}
+	if (cachep->flags & SLAB_STORE_USER)
+		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
 	realobj = (char *)objp + obj_offset(cachep);
 	size = cachep->object_size;
 	for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1657,7 +1655,7 @@
 			/* Mismatch ! */
 			/* Print header */
 			if (lines == 0) {
-				pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
+				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
 				       print_tainted(), cachep->name,
 				       realobj, size);
 				print_objinfo(cachep, objp, 0);
@@ -1686,13 +1684,13 @@
 		if (objnr) {
 			objp = index_to_obj(cachep, page, objnr - 1);
 			realobj = (char *)objp + obj_offset(cachep);
-			pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
+			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
 			print_objinfo(cachep, objp, 2);
 		}
 		if (objnr + 1 < cachep->num) {
 			objp = index_to_obj(cachep, page, objnr + 1);
 			realobj = (char *)objp + obj_offset(cachep);
-			pr_err("Next obj: start=%p, len=%d\n", realobj, size);
+			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
 			print_objinfo(cachep, objp, 2);
 		}
 	}
@@ -2639,7 +2637,7 @@
 	/* Verify double free bug */
 	for (i = page->active; i < cachep->num; i++) {
 		if (get_free_obj(page, i) == objnr) {
-			pr_err("slab: double free detected in cache '%s', objp %p\n",
+			pr_err("slab: double free detected in cache '%s', objp %px\n",
 			       cachep->name, objp);
 			BUG();
 		}
@@ -2802,7 +2800,7 @@
 	else
 		slab_error(cache, "memory outside object was overwritten");
 
-	pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
 	       obj, redzone1, redzone2);
 }
 
@@ -3102,7 +3100,7 @@
 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
 			slab_error(cachep, "double free, or memory outside object was overwritten");
-			pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
+			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
 			       objp, *dbg_redzone1(cachep, objp),
 			       *dbg_redzone2(cachep, objp));
 		}
@@ -3115,7 +3113,7 @@
 		cachep->ctor(objp);
 	if (ARCH_SLAB_MINALIGN &&
 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
-		pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
 		       objp, (int)ARCH_SLAB_MINALIGN);
 	}
 	return objp;
@@ -4339,7 +4337,7 @@
 		return;
 	}
 #endif
-	seq_printf(m, "%p", (void *)address);
+	seq_printf(m, "%px", (void *)address);
 }
 
 static int leaks_show(struct seq_file *m, void *p)
diff --git a/mm/swap.c b/mm/swap.c
index 086d7fa..4cffcb4 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -69,6 +69,7 @@
 		del_page_from_lru_list(page, lruvec, page_off_lru(page));
 		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 	}
+	__ClearPageWaiters(page);
 	mem_cgroup_uncharge(page);
 }
 
@@ -785,6 +786,7 @@
 
 		/* Clear Active bit in case of parallel mark_page_accessed */
 		__ClearPageActive(page);
+		__ClearPageWaiters(page);
 
 		list_add(&page->lru, &pages_to_free);
 	}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 2fcc719..d4f3b2f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -372,6 +372,7 @@
 			/*
 			 * Initiate read into locked page and return.
 			 */
+			SetPageWorkingset(new_page);
 			lru_cache_add_anon(new_page);
 			*new_page_allocated = true;
 			return new_page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 7683c22..27ae932 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -61,12 +61,11 @@
 	return GOOD_STACK;
 }
 
-static void report_usercopy(const void *ptr, unsigned long len,
-			    bool to_user, const char *type)
+static void report_usercopy(unsigned long len, bool to_user, const char *type)
 {
-	pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+	pr_emerg("kernel memory %s attempt detected %s '%s' (%lu bytes)\n",
 		to_user ? "exposure" : "overwrite",
-		to_user ? "from" : "to", ptr, type ? : "unknown", len);
+		to_user ? "from" : "to", type ? : "unknown", len);
 	/*
 	 * For greater effect, it would be nice to do do_group_exit(),
 	 * but BUG() actually hooks all the lock-breaking and per-arch
@@ -275,6 +274,6 @@
 		return;
 
 report:
-	report_usercopy(ptr, n, to_user, err);
+	report_usercopy(n, to_user, err);
 }
 EXPORT_SYMBOL(__check_object_size);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0ffab05..add160c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -510,7 +510,11 @@
 	}
 
 found:
-	if (addr + size > vend)
+	/*
+	 * Check also calculated address against the vstart,
+	 * because it can be 0 because of big align request.
+	 */
+	if (addr + size > vend || addr < vstart)
 		goto overflow;
 
 	va->va_start = addr;
@@ -2296,7 +2300,7 @@
 	if (!(area->flags & VM_USERMAP))
 		return -EINVAL;
 
-	if (kaddr + size > area->addr + area->size)
+	if (kaddr + size > area->addr + get_vm_area_size(area))
 		return -EINVAL;
 
 	do {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index abcc8be..b3c44fc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -47,6 +47,7 @@
 #include <linux/prefetch.h>
 #include <linux/printk.h>
 #include <linux/dax.h>
+#include <linux/psi.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -2094,6 +2095,7 @@
 		}
 
 		ClearPageActive(page);	/* we are de-activating */
+		SetPageWorkingset(page);
 		list_add(&page->lru, &l_inactive);
 	}
 
@@ -3105,6 +3107,7 @@
 {
 	struct zonelist *zonelist;
 	unsigned long nr_reclaimed;
+	unsigned long pflags;
 	int nid;
 	struct scan_control sc = {
 		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
@@ -3132,9 +3135,13 @@
 					    sc.gfp_mask,
 					    sc.reclaim_idx);
 
+	psi_memstall_enter(&pflags);
 	current->flags |= PF_MEMALLOC;
+
 	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
 	current->flags &= ~PF_MEMALLOC;
+	psi_memstall_leave(&pflags);
 
 	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
@@ -3299,6 +3306,7 @@
 	int i;
 	unsigned long nr_soft_reclaimed;
 	unsigned long nr_soft_scanned;
+	unsigned long pflags;
 	struct zone *zone;
 	struct scan_control sc = {
 		.gfp_mask = GFP_KERNEL,
@@ -3308,6 +3316,8 @@
 		.may_unmap = 1,
 		.may_swap = 1,
 	};
+
+	psi_memstall_enter(&pflags);
 	count_vm_event(PAGEOUTRUN);
 
 	do {
@@ -3401,6 +3411,7 @@
 		pgdat->kswapd_failures++;
 
 out:
+	psi_memstall_leave(&pflags);
 	/*
 	 * Return the order kswapd stopped reclaiming at as
 	 * prepare_kswapd_sleep() takes it into account. If another caller
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 727bc07..98811ce 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -957,6 +957,7 @@
 	"nr_isolated_file",
 	"workingset_refault",
 	"workingset_activate",
+	"workingset_restore",
 	"workingset_nodereclaim",
 	"nr_anon_pages",
 	"nr_mapped",
@@ -1076,13 +1077,8 @@
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
 	"nr_tlb_remote_flush",
 	"nr_tlb_remote_flush_received",
-#else
-	"", /* nr_tlb_remote_flush */
-	"", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
 	"nr_tlb_local_flush_all",
 	"nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
diff --git a/mm/workingset.c b/mm/workingset.c
index 4c4f056..a697611 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -118,7 +118,7 @@
  * the only thing eating into inactive list space is active pages.
  *
  *
- *		Activating refaulting pages
+ *		Refaulting inactive pages
  *
  * All that is known about the active list is that the pages have been
  * accessed more than once in the past.  This means that at any given
@@ -131,6 +131,10 @@
  * used less frequently than the refaulting page - or even not used at
  * all anymore.
  *
+ * That means if inactive cache is refaulting with a suitable refault
+ * distance, we assume the cache workingset is transitioning and put
+ * pressure on the current active list.
+ *
  * If this is wrong and demotion kicks in, the pages which are truly
  * used more frequently will be reactivated while the less frequently
  * used once will be evicted from memory.
@@ -138,6 +142,14 @@
  * But if this is right, the stale pages will be pushed out of memory
  * and the used pages get to stay in cache.
  *
+ *		Refaulting active pages
+ *
+ * If on the other hand the refaulting pages have recently been
+ * deactivated, it means that the active list is no longer protecting
+ * actively used cache from reclaim. The cache is NOT transitioning to
+ * a different workingset; the existing workingset is thrashing in the
+ * space allocated to the page cache.
+ *
  *
  *		Implementation
  *
@@ -153,8 +165,7 @@
  */
 
 #define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
-			 NODES_SHIFT +	\
-			 MEM_CGROUP_ID_SHIFT)
+			 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
 #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
 
 /*
@@ -167,23 +178,28 @@
  */
 static unsigned int bucket_order __read_mostly;
 
-static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
+static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
+			 bool workingset)
 {
 	eviction >>= bucket_order;
 	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
+	eviction = (eviction << 1) | workingset;
 	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
 
 	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 }
 
 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
-			  unsigned long *evictionp)
+			  unsigned long *evictionp, bool *workingsetp)
 {
 	unsigned long entry = (unsigned long)shadow;
 	int memcgid, nid;
+	bool workingset;
 
 	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
+	workingset = entry & 1;
+	entry >>= 1;
 	nid = entry & ((1UL << NODES_SHIFT) - 1);
 	entry >>= NODES_SHIFT;
 	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
@@ -192,6 +208,7 @@
 	*memcgidp = memcgid;
 	*pgdat = NODE_DATA(nid);
 	*evictionp = entry << bucket_order;
+	*workingsetp = workingset;
 }
 
 /**
@@ -204,8 +221,8 @@
  */
 void *workingset_eviction(struct address_space *mapping, struct page *page)
 {
-	struct mem_cgroup *memcg = page_memcg(page);
 	struct pglist_data *pgdat = page_pgdat(page);
+	struct mem_cgroup *memcg = page_memcg(page);
 	int memcgid = mem_cgroup_id(memcg);
 	unsigned long eviction;
 	struct lruvec *lruvec;
@@ -217,30 +234,30 @@
 
 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
 	eviction = atomic_long_inc_return(&lruvec->inactive_age);
-	return pack_shadow(memcgid, pgdat, eviction);
+	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 }
 
 /**
  * workingset_refault - evaluate the refault of a previously evicted page
+ * @page: the freshly allocated replacement page
  * @shadow: shadow entry of the evicted page
  *
  * Calculates and evaluates the refault distance of the previously
  * evicted page in the context of the node it was allocated in.
- *
- * Returns %true if the page should be activated, %false otherwise.
  */
-bool workingset_refault(void *shadow)
+void workingset_refault(struct page *page, void *shadow)
 {
 	unsigned long refault_distance;
+	struct pglist_data *pgdat;
 	unsigned long active_file;
 	struct mem_cgroup *memcg;
 	unsigned long eviction;
 	struct lruvec *lruvec;
 	unsigned long refault;
-	struct pglist_data *pgdat;
+	bool workingset;
 	int memcgid;
 
-	unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
+	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
 
 	rcu_read_lock();
 	/*
@@ -260,40 +277,51 @@
 	 * configurations instead.
 	 */
 	memcg = mem_cgroup_from_id(memcgid);
-	if (!mem_cgroup_disabled() && !memcg) {
-		rcu_read_unlock();
-		return false;
-	}
+	if (!mem_cgroup_disabled() && !memcg)
+		goto out;
 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
 	refault = atomic_long_read(&lruvec->inactive_age);
 	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
-	rcu_read_unlock();
 
 	/*
-	 * The unsigned subtraction here gives an accurate distance
-	 * across inactive_age overflows in most cases.
+	 * Calculate the refault distance
 	 *
-	 * There is a special case: usually, shadow entries have a
-	 * short lifetime and are either refaulted or reclaimed along
-	 * with the inode before they get too old.  But it is not
-	 * impossible for the inactive_age to lap a shadow entry in
-	 * the field, which can then can result in a false small
-	 * refault distance, leading to a false activation should this
-	 * old entry actually refault again.  However, earlier kernels
-	 * used to deactivate unconditionally with *every* reclaim
-	 * invocation for the longest time, so the occasional
-	 * inappropriate activation leading to pressure on the active
-	 * list is not a problem.
+	 * The unsigned subtraction here gives an accurate distance
+	 * across inactive_age overflows in most cases. There is a
+	 * special case: usually, shadow entries have a short lifetime
+	 * and are either refaulted or reclaimed along with the inode
+	 * before they get too old.  But it is not impossible for the
+	 * inactive_age to lap a shadow entry in the field, which can
+	 * then result in a false small refault distance, leading to a
+	 * false activation should this old entry actually refault
+	 * again.  However, earlier kernels used to deactivate
+	 * unconditionally with *every* reclaim invocation for the
+	 * longest time, so the occasional inappropriate activation
+	 * leading to pressure on the active list is not a problem.
 	 */
 	refault_distance = (refault - eviction) & EVICTION_MASK;
 
 	inc_node_state(pgdat, WORKINGSET_REFAULT);
 
-	if (refault_distance <= active_file) {
-		inc_node_state(pgdat, WORKINGSET_ACTIVATE);
-		return true;
+	/*
+	 * Compare the distance to the existing workingset size. We
+	 * don't act on pages that couldn't stay resident even if all
+	 * the memory was available to the page cache.
+	 */
+	if (refault_distance > active_file)
+		goto out;
+
+	SetPageActive(page);
+	atomic_long_inc(&lruvec->inactive_age);
+	inc_node_state(pgdat, WORKINGSET_ACTIVATE);
+
+	/* Page was active prior to eviction */
+	if (workingset) {
+		SetPageWorkingset(page);
+		inc_node_state(pgdat, WORKINGSET_RESTORE);
 	}
-	return false;
+out:
+	rcu_read_unlock();
 }
 
 /**
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fb3e2a5..d06d15db 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -366,10 +366,12 @@
 	ifrr.ifr_ifru = ifr->ifr_ifru;
 
 	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		if (!net_eq(dev_net(dev), &init_net))
+			break;
 	case SIOCGMIIPHY:
 	case SIOCGMIIREG:
 	case SIOCSMIIREG:
-	case SIOCSHWTSTAMP:
 	case SIOCGHWTSTAMP:
 		if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
 			err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
diff --git a/net/9p/client.c b/net/9p/client.c
index 142afe7..f1517ca 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1058,7 +1058,7 @@
 		p9_debug(P9_DEBUG_ERROR,
 			 "Please specify a msize of at least 4k\n");
 		err = -EINVAL;
-		goto free_client;
+		goto close_trans;
 	}
 
 	err = p9_client_version(clnt);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 145f805..7f1b45c 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -570,9 +570,10 @@
 	if (ret) {
 		p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
 		trace_9p_protocol_dump(clnt, &fake_pdu);
+		return ret;
 	}
 
-	return ret;
+	return fake_pdu.offset;
 }
 EXPORT_SYMBOL(p9stat_read);
 
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index af46bc4..b5f84f4 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -293,7 +293,7 @@
 	goto out;
 }
 
-void __exit atalk_proc_exit(void)
+void atalk_proc_exit(void)
 {
 	remove_proc_entry("interface", atalk_proc_dir);
 	remove_proc_entry("route", atalk_proc_dir);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 10d2bdc..e206d98 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1912,12 +1912,16 @@
 /* Called by proto.c on kernel start up */
 static int __init atalk_init(void)
 {
-	int rc = proto_register(&ddp_proto, 0);
+	int rc;
 
-	if (rc != 0)
+	rc = proto_register(&ddp_proto, 0);
+	if (rc)
 		goto out;
 
-	(void)sock_register(&atalk_family_ops);
+	rc = sock_register(&atalk_family_ops);
+	if (rc)
+		goto out_proto;
+
 	ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
 	if (!ddp_dl)
 		printk(atalk_err_snap);
@@ -1925,12 +1929,33 @@
 	dev_add_pack(&ltalk_packet_type);
 	dev_add_pack(&ppptalk_packet_type);
 
-	register_netdevice_notifier(&ddp_notifier);
+	rc = register_netdevice_notifier(&ddp_notifier);
+	if (rc)
+		goto out_sock;
+
 	aarp_proto_init();
-	atalk_proc_init();
-	atalk_register_sysctl();
+	rc = atalk_proc_init();
+	if (rc)
+		goto out_aarp;
+
+	rc = atalk_register_sysctl();
+	if (rc)
+		goto out_proc;
 out:
 	return rc;
+out_proc:
+	atalk_proc_exit();
+out_aarp:
+	aarp_cleanup_module();
+	unregister_netdevice_notifier(&ddp_notifier);
+out_sock:
+	dev_remove_pack(&ppptalk_packet_type);
+	dev_remove_pack(&ltalk_packet_type);
+	unregister_snap_client(ddp_dl);
+	sock_unregister(PF_APPLETALK);
+out_proto:
+	proto_unregister(&ddp_proto);
+	goto out;
 }
 module_init(atalk_init);
 
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index ebb8643..4e6042e 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -44,9 +44,12 @@
 
 static struct ctl_table_header *atalk_table_header;
 
-void atalk_register_sysctl(void)
+int __init atalk_register_sysctl(void)
 {
 	atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
+	if (!atalk_table_header)
+		return -ENOMEM;
+	return 0;
 }
 
 void atalk_unregister_sysctl(void)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 1e84c52..704892d 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -721,7 +721,10 @@
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-	if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+	if (arg < 0 || arg >= MAX_LEC_ITF)
+		return -EINVAL;
+	arg = array_index_nospec(arg, MAX_LEC_ITF);
+	if (!dev_lec[arg])
 		return -EINVAL;
 	vcc->proto_data = dev_lec[arg];
 	return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -739,6 +742,7 @@
 		i = arg;
 	if (arg >= MAX_LEC_ITF)
 		return -EINVAL;
+	i = array_index_nospec(arg, MAX_LEC_ITF);
 	if (!dev_lec[i]) {
 		int size;
 
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8b6f654..0012306 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -802,6 +802,8 @@
 				 const u8 *mac, const unsigned short vid)
 {
 	struct batadv_bla_claim search_claim, *claim;
+	struct batadv_bla_claim *claim_removed_entry;
+	struct hlist_node *claim_removed_node;
 
 	ether_addr_copy(search_claim.addr, mac);
 	search_claim.vid = vid;
@@ -812,10 +814,18 @@
 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
 		   mac, BATADV_PRINT_VID(vid));
 
-	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-			   batadv_choose_claim, claim);
-	batadv_claim_put(claim); /* reference from the hash is gone */
+	claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+						batadv_compare_claim,
+						batadv_choose_claim, claim);
+	if (!claim_removed_node)
+		goto free_claim;
 
+	/* reference from the hash is gone */
+	claim_removed_entry = hlist_entry(claim_removed_node,
+					  struct batadv_bla_claim, hash_entry);
+	batadv_claim_put(claim_removed_entry);
+
+free_claim:
 	/* don't need the reference from hash_find() anymore */
 	batadv_claim_put(claim);
 }
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b9f9a31..af4a02a 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -615,14 +615,26 @@
 				  struct batadv_tt_global_entry *tt_global,
 				  const char *message)
 {
+	struct batadv_tt_global_entry *tt_removed_entry;
+	struct hlist_node *tt_removed_node;
+
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Deleting global tt entry %pM (vid: %d): %s\n",
 		   tt_global->common.addr,
 		   BATADV_PRINT_VID(tt_global->common.vid), message);
 
-	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-			   batadv_choose_tt, &tt_global->common);
-	batadv_tt_global_entry_put(tt_global);
+	tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+					     batadv_compare_tt,
+					     batadv_choose_tt,
+					     &tt_global->common);
+	if (!tt_removed_node)
+		return;
+
+	/* drop reference of remove hash entry */
+	tt_removed_entry = hlist_entry(tt_removed_node,
+				       struct batadv_tt_global_entry,
+				       common.hash_entry);
+	batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1308,9 +1320,10 @@
 			   unsigned short vid, const char *message,
 			   bool roaming)
 {
+	struct batadv_tt_local_entry *tt_removed_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
 	u16 flags, curr_flags = BATADV_NO_FLAGS;
-	void *tt_entry_exists;
+	struct hlist_node *tt_removed_node;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
@@ -1339,15 +1352,18 @@
 	 */
 	batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-	tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+	tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
 					     batadv_compare_tt,
 					     batadv_choose_tt,
 					     &tt_local_entry->common);
-	if (!tt_entry_exists)
+	if (!tt_removed_node)
 		goto out;
 
-	/* extra call to free the local tt entry */
-	batadv_tt_local_entry_put(tt_local_entry);
+	/* drop reference of remove hash entry */
+	tt_removed_entry = hlist_entry(tt_removed_node,
+				       struct batadv_tt_local_entry,
+				       common.hash_entry);
+	batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
 	if (tt_local_entry)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index cc06149..fe4fb0c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1165,6 +1165,14 @@
 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
 		return 0;
 
+	/* The minimum encryption key size needs to be enforced by the
+	 * host stack before establishing any L2CAP connections. The
+	 * specification in theory allows a minimum of 1, but to align
+	 * BR/EDR and LE transports, a minimum of 7 is chosen.
+	 */
+	if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
+		return 0;
+
 	return 1;
 }
 
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index c88a600..ca18369 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -826,8 +826,6 @@
 	if (!sk)
 		return 0;
 
-	hdev = hci_pi(sk)->hdev;
-
 	switch (hci_pi(sk)->channel) {
 	case HCI_CHANNEL_MONITOR:
 		atomic_dec(&monitor_promisc);
@@ -849,6 +847,7 @@
 
 	bt_sock_unlink(&hci_sk_list, sk);
 
+	hdev = hci_pi(sk)->hdev;
 	if (hdev) {
 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
 			/* When releasing an user channel exclusive access,
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 008ba43..cc80c761 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -76,6 +76,7 @@
 			sockfd_put(csock);
 			return err;
 		}
+		ca.name[sizeof(ca.name)-1] = 0;
 
 		err = hidp_connection_add(&ca, csock, isock);
 		if (!err && copy_to_user(argp, &ca, sizeof(ca)))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 1fc23cb..d49aa4e 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3326,16 +3326,22 @@
 
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+		if (len < 0)
+			break;
 
 		hint  = type & L2CAP_CONF_HINT;
 		type &= L2CAP_CONF_MASK;
 
 		switch (type) {
 		case L2CAP_CONF_MTU:
+			if (olen != 2)
+				break;
 			mtu = val;
 			break;
 
 		case L2CAP_CONF_FLUSH_TO:
+			if (olen != 2)
+				break;
 			chan->flush_to = val;
 			break;
 
@@ -3343,26 +3349,30 @@
 			break;
 
 		case L2CAP_CONF_RFC:
-			if (olen == sizeof(rfc))
-				memcpy(&rfc, (void *) val, olen);
+			if (olen != sizeof(rfc))
+				break;
+			memcpy(&rfc, (void *) val, olen);
 			break;
 
 		case L2CAP_CONF_FCS:
+			if (olen != 1)
+				break;
 			if (val == L2CAP_FCS_NONE)
 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
 			break;
 
 		case L2CAP_CONF_EFS:
-			if (olen == sizeof(efs)) {
-				remote_efs = 1;
-				memcpy(&efs, (void *) val, olen);
-			}
+			if (olen != sizeof(efs))
+				break;
+			remote_efs = 1;
+			memcpy(&efs, (void *) val, olen);
 			break;
 
 		case L2CAP_CONF_EWS:
+			if (olen != 2)
+				break;
 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
 				return -ECONNREFUSED;
-
 			set_bit(FLAG_EXT_CTRL, &chan->flags);
 			set_bit(CONF_EWS_RECV, &chan->conf_state);
 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3372,7 +3382,6 @@
 		default:
 			if (hint)
 				break;
-
 			result = L2CAP_CONF_UNKNOWN;
 			*((u8 *) ptr++) = type;
 			break;
@@ -3537,58 +3546,65 @@
 
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+		if (len < 0)
+			break;
 
 		switch (type) {
 		case L2CAP_CONF_MTU:
+			if (olen != 2)
+				break;
 			if (val < L2CAP_DEFAULT_MIN_MTU) {
 				*result = L2CAP_CONF_UNACCEPT;
 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
 			} else
 				chan->imtu = val;
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
+					   endptr - ptr);
 			break;
 
 		case L2CAP_CONF_FLUSH_TO:
+			if (olen != 2)
+				break;
 			chan->flush_to = val;
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
-					   2, chan->flush_to, endptr - ptr);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
+					   chan->flush_to, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_RFC:
-			if (olen == sizeof(rfc))
-				memcpy(&rfc, (void *)val, olen);
-
+			if (olen != sizeof(rfc))
+				break;
+			memcpy(&rfc, (void *)val, olen);
 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
 			    rfc.mode != chan->mode)
 				return -ECONNREFUSED;
-
 			chan->fcs = 0;
-
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+					   (unsigned long) &rfc, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_EWS:
+			if (olen != 2)
+				break;
 			chan->ack_win = min_t(u16, val, chan->ack_win);
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
 					   chan->tx_win, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_EFS:
-			if (olen == sizeof(efs)) {
-				memcpy(&efs, (void *)val, olen);
-
-				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-				    efs.stype != L2CAP_SERV_NOTRAFIC &&
-				    efs.stype != chan->local_stype)
-					return -ECONNREFUSED;
-
-				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-						   (unsigned long) &efs, endptr - ptr);
-			}
+			if (olen != sizeof(efs))
+				break;
+			memcpy(&efs, (void *)val, olen);
+			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+			    efs.stype != L2CAP_SERV_NOTRAFIC &&
+			    efs.stype != chan->local_stype)
+				return -ECONNREFUSED;
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+					   (unsigned long) &efs, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_FCS:
+			if (olen != 1)
+				break;
 			if (*result == L2CAP_CONF_PENDING)
 				if (val == L2CAP_FCS_NONE)
 					set_bit(CONF_RECV_NO_FCS,
@@ -3717,13 +3733,18 @@
 
 	while (len >= L2CAP_CONF_OPT_SIZE) {
 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+		if (len < 0)
+			break;
 
 		switch (type) {
 		case L2CAP_CONF_RFC:
-			if (olen == sizeof(rfc))
-				memcpy(&rfc, (void *)val, olen);
+			if (olen != sizeof(rfc))
+				break;
+			memcpy(&rfc, (void *)val, olen);
 			break;
 		case L2CAP_CONF_EWS:
+			if (olen != 2)
+				break;
 			txwin_ext = val;
 			break;
 		}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f57de0a..2e05a79 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -519,13 +519,15 @@
 	call_netdevice_notifiers(NETDEV_JOIN, dev);
 
 	err = dev_set_allmulti(dev, 1);
-	if (err)
-		goto put_back;
+	if (err) {
+		kfree(p);	/* kobject not yet init'd, manually free */
+		goto err1;
+	}
 
 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
 				   SYSFS_BRIDGE_PORT_ATTR);
 	if (err)
-		goto err1;
+		goto err2;
 
 	err = br_sysfs_addif(p);
 	if (err)
@@ -608,12 +610,9 @@
 	sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
 	kobject_put(&p->kobj);
-	p = NULL; /* kobject_put frees */
-err1:
 	dev_set_allmulti(dev, -1);
-put_back:
+err1:
 	dev_put(dev);
-	kfree(p);
 	return err;
 }
 
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 267b46a..c615dff 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -231,13 +231,10 @@
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
 	__br_handle_local_finish(skb);
 
-	BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-	br_pass_frame_up(skb);
-	return 0;
+	/* return 1 to signal the okfn() was called so it's ok to use the skb */
+	return 1;
 }
 
 /*
@@ -308,10 +305,18 @@
 				goto forward;
 		}
 
-		/* Deliver packet to local host only */
-		NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-			NULL, skb, skb->dev, NULL, br_handle_local_finish);
-		return RX_HANDLER_CONSUMED;
+		/* The else clause should be hit when nf_hook():
+		 *   - returns < 0 (drop/error)
+		 *   - returns = 0 (stolen/nf_queue)
+		 * Thus return 1 from the okfn() to signal the skb is ok to pass
+		 */
+		if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+			    dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+			    br_handle_local_finish) == 1) {
+			return RX_HANDLER_PASS;
+		} else {
+			return RX_HANDLER_CONSUMED;
+		}
 	}
 
 forward:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2136e45..964ffff 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1983,7 +1983,8 @@
 
 	__br_multicast_open(br, query);
 
-	list_for_each_entry(port, &br->port_list, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(port, &br->port_list, list) {
 		if (port->state == BR_STATE_DISABLED ||
 		    port->state == BR_STATE_BLOCKING)
 			continue;
@@ -1995,6 +1996,7 @@
 			br_multicast_enable(&port->ip6_own_query);
 #endif
 	}
+	rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 7e42c0d..0c96773 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -512,6 +512,7 @@
 	nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
 
 	skb->protocol = htons(ETH_P_IP);
+	skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
 
 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
 		skb->dev, NULL,
@@ -878,11 +879,6 @@
 	.br_dev_xmit_hook =	br_nf_dev_xmit,
 };
 
-void br_netfilter_enable(void)
-{
-}
-EXPORT_SYMBOL_GPL(br_netfilter_enable);
-
 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
  * br_dev_queue_push_xmit is called afterwards */
 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index a1b57cb..8c08dd0 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@
 	nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
 	skb->protocol = htons(ETH_P_IPV6);
+	skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
 		skb->dev, NULL,
 		br_nf_pre_routing_finish_ipv6);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c7e5aaf..142ccaa 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2056,7 +2056,8 @@
 		if (match_kern)
 			match_kern->match_size = ret;
 
-		if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+		/* rule should have no remaining data after target */
+		if (type == EBT_COMPAT_TARGET && size_left)
 			return -EINVAL;
 
 		match32 = (struct compat_ebt_entry_mwt *) buf;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1..4dc82e9 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -352,15 +352,14 @@
 	u8 cmdrsp;
 	u8 cmd;
 	int ret = -1;
-	u16 tmp16;
 	u8 len;
 	u8 param[255];
-	u8 linkid;
+	u8 linkid = 0;
 	struct cfctrl *cfctrl = container_obj(layer);
 	struct cfctrl_request_info rsp, *req;
 
 
-	cfpkt_extr_head(pkt, &cmdrsp, 1);
+	cmdrsp = cfpkt_extr_head_u8(pkt);
 	cmd = cmdrsp & CFCTRL_CMD_MASK;
 	if (cmd != CFCTRL_CMD_LINK_ERR
 	    && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
@@ -378,13 +377,12 @@
 			u8 physlinkid;
 			u8 prio;
 			u8 tmp;
-			u32 tmp32;
 			u8 *cp;
 			int i;
 			struct cfctrl_link_param linkparam;
 			memset(&linkparam, 0, sizeof(linkparam));
 
-			cfpkt_extr_head(pkt, &tmp, 1);
+			tmp = cfpkt_extr_head_u8(pkt);
 
 			serv = tmp & CFCTRL_SRV_MASK;
 			linkparam.linktype = serv;
@@ -392,13 +390,13 @@
 			servtype = tmp >> 4;
 			linkparam.chtype = servtype;
 
-			cfpkt_extr_head(pkt, &tmp, 1);
+			tmp = cfpkt_extr_head_u8(pkt);
 			physlinkid = tmp & 0x07;
 			prio = tmp >> 3;
 
 			linkparam.priority = prio;
 			linkparam.phyid = physlinkid;
-			cfpkt_extr_head(pkt, &endpoint, 1);
+			endpoint = cfpkt_extr_head_u8(pkt);
 			linkparam.endpoint = endpoint & 0x03;
 
 			switch (serv) {
@@ -407,45 +405,43 @@
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 			case CFCTRL_SRV_VIDEO:
-				cfpkt_extr_head(pkt, &tmp, 1);
+				tmp = cfpkt_extr_head_u8(pkt);
 				linkparam.u.video.connid = tmp;
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 
 			case CFCTRL_SRV_DATAGRAM:
-				cfpkt_extr_head(pkt, &tmp32, 4);
 				linkparam.u.datagram.connid =
-				    le32_to_cpu(tmp32);
+				    cfpkt_extr_head_u32(pkt);
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				break;
 			case CFCTRL_SRV_RFM:
 				/* Construct a frame, convert
 				 * DatagramConnectionID
 				 * to network format long and copy it out...
 				 */
-				cfpkt_extr_head(pkt, &tmp32, 4);
 				linkparam.u.rfm.connid =
-				  le32_to_cpu(tmp32);
+				    cfpkt_extr_head_u32(pkt);
 				cp = (u8 *) linkparam.u.rfm.volume;
-				for (cfpkt_extr_head(pkt, &tmp, 1);
+				for (tmp = cfpkt_extr_head_u8(pkt);
 				     cfpkt_more(pkt) && tmp != '\0';
-				     cfpkt_extr_head(pkt, &tmp, 1))
+				     tmp = cfpkt_extr_head_u8(pkt))
 					*cp++ = tmp;
 				*cp = '\0';
 
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 
 				break;
 			case CFCTRL_SRV_UTIL:
@@ -454,13 +450,11 @@
 				 * to network format long and copy it out...
 				 */
 				/* Fifosize KB */
-				cfpkt_extr_head(pkt, &tmp16, 2);
 				linkparam.u.utility.fifosize_kb =
-				    le16_to_cpu(tmp16);
+				    cfpkt_extr_head_u16(pkt);
 				/* Fifosize bufs */
-				cfpkt_extr_head(pkt, &tmp16, 2);
 				linkparam.u.utility.fifosize_bufs =
-				    le16_to_cpu(tmp16);
+				    cfpkt_extr_head_u16(pkt);
 				/* name */
 				cp = (u8 *) linkparam.u.utility.name;
 				caif_assert(sizeof(linkparam.u.utility.name)
@@ -468,24 +462,24 @@
 				for (i = 0;
 				     i < UTILITY_NAME_LENGTH
 				     && cfpkt_more(pkt); i++) {
-					cfpkt_extr_head(pkt, &tmp, 1);
+					tmp = cfpkt_extr_head_u8(pkt);
 					*cp++ = tmp;
 				}
 				/* Length */
-				cfpkt_extr_head(pkt, &len, 1);
+				len = cfpkt_extr_head_u8(pkt);
 				linkparam.u.utility.paramlen = len;
 				/* Param Data */
 				cp = linkparam.u.utility.params;
 				while (cfpkt_more(pkt) && len--) {
-					cfpkt_extr_head(pkt, &tmp, 1);
+					tmp = cfpkt_extr_head_u8(pkt);
 					*cp++ = tmp;
 				}
 				if (CFCTRL_ERR_BIT & cmdrsp)
 					break;
 				/* Link ID */
-				cfpkt_extr_head(pkt, &linkid, 1);
+				linkid = cfpkt_extr_head_u8(pkt);
 				/* Length */
-				cfpkt_extr_head(pkt, &len, 1);
+				len = cfpkt_extr_head_u8(pkt);
 				/* Param Data */
 				cfpkt_extr_head(pkt, &param, len);
 				break;
@@ -522,7 +516,7 @@
 		}
 		break;
 	case CFCTRL_CMD_LINK_DESTROY:
-		cfpkt_extr_head(pkt, &linkid, 1);
+		linkid = cfpkt_extr_head_u8(pkt);
 		cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
 		break;
 	case CFCTRL_CMD_LINK_ERR:
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 464e885..bf0294c 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -699,7 +699,6 @@
 }
 EXPORT_SYMBOL(__ceph_open_session);
 
-
 int ceph_open_session(struct ceph_client *client)
 {
 	int ret;
@@ -715,6 +714,23 @@
 }
 EXPORT_SYMBOL(ceph_open_session);
 
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+				unsigned long timeout)
+{
+	u64 newest_epoch;
+	int ret;
+
+	ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
+	if (ret)
+		return ret;
+
+	if (client->osdc.osdmap->epoch >= newest_epoch)
+		return 0;
+
+	ceph_osdc_maybe_request_map(&client->osdc);
+	return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
+}
+EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
 
 static int __init init_ceph_lib(void)
 {
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 5004810..288c1fc 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -914,6 +914,15 @@
 	mutex_unlock(&monc->mutex);
 
 	ret = wait_generic_request(req);
+	if (!ret)
+		/*
+		 * Make sure we have the osdmap that includes the blacklist
+		 * entry.  This is needed to ensure that the OSDs pick up the
+		 * new blacklist before processing any future requests from
+		 * this client.
+		 */
+		ret = ceph_wait_for_latest_osdmap(monc->client, 0);
+
 out:
 	put_generic_request(req);
 	return ret;
diff --git a/net/core/dev.c b/net/core/dev.c
index 77dbbfc..3db31ba 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7631,7 +7631,7 @@
 
 		refcnt = netdev_refcnt_read(dev);
 
-		if (time_after(jiffies, warning_time + 10 * HZ)) {
+		if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
 				 dev->name, refcnt);
 			warning_time = jiffies;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a8a9938..20ae57f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1801,17 +1801,22 @@
 
 	gstrings.len = ret;
 
-	data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
-	if (!data)
-		return -ENOMEM;
+	if (gstrings.len) {
+		data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+		if (!data)
+			return -ENOMEM;
 
-	__ethtool_get_strings(dev, gstrings.string_set, data);
+		__ethtool_get_strings(dev, gstrings.string_set, data);
+	} else {
+		data = NULL;
+	}
 
 	ret = -EFAULT;
 	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
 		goto out;
 	useraddr += sizeof(gstrings);
-	if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+	if (gstrings.len &&
+	    copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
 		goto out;
 	ret = 0;
 
@@ -1899,17 +1904,21 @@
 		return -EFAULT;
 
 	stats.n_stats = n_stats;
-	data = kmalloc(n_stats * sizeof(u64), GFP_USER);
-	if (!data)
-		return -ENOMEM;
+	if (n_stats) {
+		data = kmalloc(n_stats * sizeof(u64), GFP_USER);
+		if (!data)
+			return -ENOMEM;
 
-	ops->get_ethtool_stats(dev, &stats, data);
+		ops->get_ethtool_stats(dev, &stats, data);
+	} else {
+		data = NULL;
+	}
 
 	ret = -EFAULT;
 	if (copy_to_user(useraddr, &stats, sizeof(stats)))
 		goto out;
 	useraddr += sizeof(stats);
-	if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+	if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
 		goto out;
 	ret = 0;
 
@@ -1938,19 +1947,23 @@
 		return -EFAULT;
 
 	stats.n_stats = n_stats;
-	data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
-	if (!data)
-		return -ENOMEM;
+	if (n_stats) {
+		data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
+		if (!data)
+			return -ENOMEM;
 
-	mutex_lock(&phydev->lock);
-	phydev->drv->get_stats(phydev, &stats, data);
-	mutex_unlock(&phydev->lock);
+		mutex_lock(&phydev->lock);
+		phydev->drv->get_stats(phydev, &stats, data);
+		mutex_unlock(&phydev->lock);
+	} else {
+		data = NULL;
+	}
 
 	ret = -EFAULT;
 	if (copy_to_user(useraddr, &stats, sizeof(stats)))
 		goto out;
 	useraddr += sizeof(stats);
-	if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+	if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
 		goto out;
 	ret = 0;
 
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 31c4041..268e320 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -486,9 +486,10 @@
 		rule->uid_range = fib_kuid_range_unset;
 	}
 
-	if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-	    rule_exists(ops, frh, tb, rule)) {
-		err = -EEXIST;
+	if (rule_exists(ops, frh, tb, rule)) {
+		err = 0;
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			err = -EEXIST;
 		goto errout_free;
 	}
 
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 04fd04c..4509dec 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -282,6 +282,7 @@
 
 	atomic_set(&net->count, 1);
 	atomic_set(&net->passive, 1);
+	get_random_bytes(&net->hash_mix, sizeof(u32));
 	net->dev_base_seq = 1;
 	net->user_ns = user_ns;
 	idr_init(&net->netns_ids);
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
index 6bc3d7a..7b872a1 100644
--- a/net/core/sockev_nlmcast.c
+++ b/net/core/sockev_nlmcast.c
@@ -72,11 +72,13 @@
 
 	sock = (struct socket *)data;
 	if (!socknlmsgsk || !sock)
-		goto done;
+		goto sk_null;
 
 	sk = sock->sk;
 	if (!sk)
-		goto done;
+		goto sk_null;
+
+	sock_hold(sk);
 
 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
 		goto done;
@@ -109,6 +111,8 @@
 	smsg->skflags = sk->sk_flags;
 	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
 done:
+	sock_put(sk);
+sk_null:
 	return 0;
 }
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 28ad6f1..1d6d3aa 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -596,13 +596,7 @@
 	if (inet_csk_reqsk_queue_is_full(sk))
 		goto drop;
 
-	/*
-	 * Accept backlog is full. If we have already queued enough
-	 * of warm entries in syn queue, drop request. It is better than
-	 * clogging syn queue with openreqs with exponentially increasing
-	 * timeout.
-	 */
-	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+	if (sk_acceptq_is_full(sk))
 		goto drop;
 
 	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6cbcf39..87c513b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -328,7 +328,7 @@
 	if (inet_csk_reqsk_queue_is_full(sk))
 		goto drop;
 
-	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+	if (sk_acceptq_is_full(sk))
 		goto drop;
 
 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
@@ -431,8 +431,8 @@
 		newnp->ipv6_mc_list = NULL;
 		newnp->ipv6_ac_list = NULL;
 		newnp->ipv6_fl_list = NULL;
-		newnp->mcast_oif   = inet6_iif(skb);
-		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
+		newnp->mcast_oif   = inet_iif(skb);
+		newnp->mcast_hops  = ip_hdr(skb)->ttl;
 
 		/*
 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 16737cd..52694cb 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -94,9 +94,8 @@
 			&& (old_operstate != IF_OPER_UP)) {
 		/* Went up */
 		hsr->announce_count = 0;
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer,
+			  jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
 	}
 
 	if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -331,6 +330,7 @@
 {
 	struct hsr_priv *hsr;
 	struct hsr_port *master;
+	unsigned long interval;
 
 	hsr = (struct hsr_priv *) data;
 
@@ -342,18 +342,16 @@
 				hsr->protVersion);
 		hsr->announce_count++;
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+		interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
 	} else {
 		send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
 				hsr->protVersion);
 
-		hsr->announce_timer.expires = jiffies +
-				msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+		interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 	}
 
 	if (is_admin_up(master->dev))
-		add_timer(&hsr->announce_timer);
+		mod_timer(&hsr->announce_timer, jiffies + interval);
 
 	rcu_read_unlock();
 }
@@ -485,7 +483,7 @@
 
 	res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
 	if (res)
-		return res;
+		goto err_add_port;
 
 	res = register_netdevice(hsr_dev);
 	if (res)
@@ -505,6 +503,8 @@
 fail:
 	hsr_for_each_port(hsr, port)
 		hsr_del_port(port);
+err_add_port:
+	hsr_del_node(&hsr->self_node_db);
 
 	return res;
 }
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 284a9b8..6705420 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@
 	return 0;
 }
 
+void hsr_del_node(struct list_head *self_node_db)
+{
+	struct hsr_node *node;
+
+	rcu_read_lock();
+	node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+	rcu_read_unlock();
+	if (node) {
+		list_del_rcu(&node->mac_list);
+		kfree(node);
+	}
+}
 
 /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
  * seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 4e04f0e..43958a3 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
 
 struct hsr_node;
 
+void hsr_del_node(struct list_head *self_node_db);
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 			      u16 seq_out);
 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index aab1e2d..c01df34 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -25,7 +25,7 @@
 
 #include <net/ieee802154_netdev.h>
 #include <net/6lowpan.h>
-#include <net/ipv6.h>
+#include <net/ipv6_frag.h>
 #include <net/inet_frag.h>
 
 #include "6lowpan_i.h"
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 030d153..17acc89 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -119,6 +119,7 @@
 	struct guehdr *guehdr;
 	void *data;
 	u16 doffset = 0;
+	u8 proto_ctype;
 
 	if (!fou)
 		return 1;
@@ -210,13 +211,14 @@
 	if (unlikely(guehdr->control))
 		return gue_control_message(skb, guehdr);
 
+	proto_ctype = guehdr->proto_ctype;
 	__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
 	skb_reset_transport_header(skb);
 
 	if (iptunnel_pull_offloads(skb))
 		goto drop;
 
-	return -guehdr->proto_ctype;
+	return -proto_ctype;
 
 drop:
 	kfree_skb(skb);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 9453180..80a2d99 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -797,7 +797,6 @@
 		tcp_sk(child)->fastopen_rsk = NULL;
 	}
 	inet_csk_destroy_sock(child);
-	reqsk_put(req);
 }
 
 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
@@ -868,6 +867,7 @@
 		sock_hold(child);
 
 		inet_child_forget(sk, req, child);
+		reqsk_put(req);
 		bh_unlock_sock(child);
 		local_bh_enable();
 		sock_put(child);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0fb49de..2325cd3 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -24,6 +24,62 @@
 #include <net/sock.h>
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+	union {
+		struct inet_skb_parm	h4;
+		struct inet6_skb_parm	h6;
+	};
+	struct sk_buff		*next_frag;
+	int			frag_run_len;
+};
+
+#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+	RB_CLEAR_NODE(&skb->rbnode);
+	FRAG_CB(skb)->next_frag = NULL;
+	FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+				   struct sk_buff *skb)
+{
+	fragcb_clear(skb);
+
+	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+	FRAG_CB(q->fragments_tail)->next_frag = skb;
+	q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+	fragcb_clear(skb);
+
+	if (q->last_run_head)
+		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+			     &q->last_run_head->rbnode.rb_right);
+	else
+		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+	rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+	q->fragments_tail = skb;
+	q->last_run_head = skb;
+}
 
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
@@ -122,6 +178,28 @@
 	kmem_cache_free(f->frags_cachep, q);
 }
 
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+	struct rb_node *p = rb_first(root);
+	unsigned int sum = 0;
+
+	while (p) {
+		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+		p = rb_next(p);
+		rb_erase(&skb->rbnode, root);
+		while (skb) {
+			struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+			sum += skb->truesize;
+			kfree_skb(skb);
+			skb = next;
+		}
+	}
+	return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
 void inet_frag_destroy(struct inet_frag_queue *q)
 {
 	struct sk_buff *fp;
@@ -223,3 +301,218 @@
 	return fq;
 }
 EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+			   int offset, int end)
+{
+	struct sk_buff *last = q->fragments_tail;
+
+	/* RFC5722, Section 4, amended by Errata ID : 3089
+	 *                          When reassembling an IPv6 datagram, if
+	 *   one or more its constituent fragments is determined to be an
+	 *   overlapping fragment, the entire datagram (and any constituent
+	 *   fragments) MUST be silently discarded.
+	 *
+	 * Duplicates, however, should be ignored (i.e. skb dropped, but the
+	 * queue/fragments kept for later reassembly).
+	 */
+	if (!last)
+		fragrun_create(q, skb);  /* First fragment. */
+	else if (last->ip_defrag_offset + last->len < end) {
+		/* This is the common case: skb goes to the end. */
+		/* Detect and discard overlaps. */
+		if (offset < last->ip_defrag_offset + last->len)
+			return IPFRAG_OVERLAP;
+		if (offset == last->ip_defrag_offset + last->len)
+			fragrun_append_to_last(q, skb);
+		else
+			fragrun_create(q, skb);
+	} else {
+		/* Binary search. Note that skb can become the first fragment,
+		 * but not the last (covered above).
+		 */
+		struct rb_node **rbn, *parent;
+
+		rbn = &q->rb_fragments.rb_node;
+		do {
+			struct sk_buff *curr;
+			int curr_run_end;
+
+			parent = *rbn;
+			curr = rb_to_skb(parent);
+			curr_run_end = curr->ip_defrag_offset +
+					FRAG_CB(curr)->frag_run_len;
+			if (end <= curr->ip_defrag_offset)
+				rbn = &parent->rb_left;
+			else if (offset >= curr_run_end)
+				rbn = &parent->rb_right;
+			else if (offset >= curr->ip_defrag_offset &&
+				 end <= curr_run_end)
+				return IPFRAG_DUP;
+			else
+				return IPFRAG_OVERLAP;
+		} while (*rbn);
+		/* Here we have parent properly set, and rbn pointing to
+		 * one of its NULL left/right children. Insert skb.
+		 */
+		fragcb_clear(skb);
+		rb_link_node(&skb->rbnode, parent, rbn);
+		rb_insert_color(&skb->rbnode, &q->rb_fragments);
+	}
+
+	skb->ip_defrag_offset = offset;
+
+	return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+			      struct sk_buff *parent)
+{
+	struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+	struct sk_buff **nextp;
+	int delta;
+
+	if (head != skb) {
+		fp = skb_clone(skb, GFP_ATOMIC);
+		if (!fp)
+			return NULL;
+		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+		if (RB_EMPTY_NODE(&skb->rbnode))
+			FRAG_CB(parent)->next_frag = fp;
+		else
+			rb_replace_node(&skb->rbnode, &fp->rbnode,
+					&q->rb_fragments);
+		if (q->fragments_tail == skb)
+			q->fragments_tail = fp;
+		skb_morph(skb, head);
+		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+		rb_replace_node(&head->rbnode, &skb->rbnode,
+				&q->rb_fragments);
+		consume_skb(head);
+		head = skb;
+	}
+	WARN_ON(head->ip_defrag_offset != 0);
+
+	delta = -head->truesize;
+
+	/* Head of list must not be cloned. */
+	if (skb_unclone(head, GFP_ATOMIC))
+		return NULL;
+
+	delta += head->truesize;
+	if (delta)
+		add_frag_mem_limit(q->net, delta);
+
+	/* If the first fragment is fragmented itself, we split
+	 * it to two chunks: the first with data and paged part
+	 * and the second, holding only fragments.
+	 */
+	if (skb_has_frag_list(head)) {
+		struct sk_buff *clone;
+		int i, plen = 0;
+
+		clone = alloc_skb(0, GFP_ATOMIC);
+		if (!clone)
+			return NULL;
+		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+		skb_frag_list_init(head);
+		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+		clone->data_len = head->data_len - plen;
+		clone->len = clone->data_len;
+		head->truesize += clone->truesize;
+		clone->csum = 0;
+		clone->ip_summed = head->ip_summed;
+		add_frag_mem_limit(q->net, clone->truesize);
+		skb_shinfo(head)->frag_list = clone;
+		nextp = &clone->next;
+	} else {
+		nextp = &skb_shinfo(head)->frag_list;
+	}
+
+	return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+			    void *reasm_data)
+{
+	struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+	struct rb_node *rbn;
+	struct sk_buff *fp;
+
+	skb_push(head, head->data - skb_network_header(head));
+
+	/* Traverse the tree in order, to build frag_list. */
+	fp = FRAG_CB(head)->next_frag;
+	rbn = rb_next(&head->rbnode);
+	rb_erase(&head->rbnode, &q->rb_fragments);
+	while (rbn || fp) {
+		/* fp points to the next sk_buff in the current run;
+		 * rbn points to the next run.
+		 */
+		/* Go through the current run. */
+		while (fp) {
+			*nextp = fp;
+			nextp = &fp->next;
+			fp->prev = NULL;
+			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+			fp->sk = NULL;
+			head->data_len += fp->len;
+			head->len += fp->len;
+			if (head->ip_summed != fp->ip_summed)
+				head->ip_summed = CHECKSUM_NONE;
+			else if (head->ip_summed == CHECKSUM_COMPLETE)
+				head->csum = csum_add(head->csum, fp->csum);
+			head->truesize += fp->truesize;
+			fp = FRAG_CB(fp)->next_frag;
+		}
+		/* Move to the next run. */
+		if (rbn) {
+			struct rb_node *rbnext = rb_next(rbn);
+
+			fp = rb_to_skb(rbn);
+			rb_erase(rbn, &q->rb_fragments);
+			rbn = rbnext;
+		}
+	}
+	sub_frag_mem_limit(q->net, head->truesize);
+
+	*nextp = NULL;
+	head->next = NULL;
+	head->prev = NULL;
+	head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+	struct sk_buff *head;
+
+	if (q->fragments) {
+		head = q->fragments;
+		q->fragments = head->next;
+	} else {
+		struct sk_buff *skb;
+
+		head = skb_rb_first(&q->rb_fragments);
+		if (!head)
+			return NULL;
+		skb = FRAG_CB(head)->next_frag;
+		if (skb)
+			rb_replace_node(&head->rbnode, &skb->rbnode,
+					&q->rb_fragments);
+		else
+			rb_erase(&head->rbnode, &q->rb_fragments);
+		memset(&head->rbnode, 0, sizeof(head->rbnode));
+		barrier();
+	}
+	if (head == q->fragments_tail)
+		q->fragments_tail = NULL;
+
+	sub_frag_mem_limit(q->net, head->truesize);
+
+	return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index c7334d1..6e9ba9d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -56,57 +56,6 @@
  */
 static const char ip_frag_cache_name[] = "ip4-frags";
 
-/* Use skb->cb to track consecutive/adjacent fragments coming at
- * the end of the queue. Nodes in the rb-tree queue will
- * contain "runs" of one or more adjacent fragments.
- *
- * Invariants:
- * - next_frag is NULL at the tail of a "run";
- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
- */
-struct ipfrag_skb_cb {
-	struct inet_skb_parm	h;
-	struct sk_buff		*next_frag;
-	int			frag_run_len;
-};
-
-#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
-
-static void ip4_frag_init_run(struct sk_buff *skb)
-{
-	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
-
-	FRAG_CB(skb)->next_frag = NULL;
-	FRAG_CB(skb)->frag_run_len = skb->len;
-}
-
-/* Append skb to the last "run". */
-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
-					struct sk_buff *skb)
-{
-	RB_CLEAR_NODE(&skb->rbnode);
-	FRAG_CB(skb)->next_frag = NULL;
-
-	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
-	FRAG_CB(q->fragments_tail)->next_frag = skb;
-	q->fragments_tail = skb;
-}
-
-/* Create a new "run" with the skb. */
-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
-{
-	if (q->last_run_head)
-		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
-			     &q->last_run_head->rbnode.rb_right);
-	else
-		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
-	rb_insert_color(&skb->rbnode, &q->rb_fragments);
-
-	ip4_frag_init_run(skb);
-	q->fragments_tail = skb;
-	q->last_run_head = skb;
-}
-
 /* Describe an entry in the "incomplete datagrams" queue. */
 struct ipq {
 	struct inet_frag_queue q;
@@ -210,27 +159,9 @@
 	 * pull the head out of the tree in order to be able to
 	 * deal with head->dev.
 	 */
-	if (qp->q.fragments) {
-		head = qp->q.fragments;
-		qp->q.fragments = head->next;
-	} else {
-		head = skb_rb_first(&qp->q.rb_fragments);
-		if (!head)
-			goto out;
-		if (FRAG_CB(head)->next_frag)
-			rb_replace_node(&head->rbnode,
-					&FRAG_CB(head)->next_frag->rbnode,
-					&qp->q.rb_fragments);
-		else
-			rb_erase(&head->rbnode, &qp->q.rb_fragments);
-		memset(&head->rbnode, 0, sizeof(head->rbnode));
-		barrier();
-	}
-	if (head == qp->q.fragments_tail)
-		qp->q.fragments_tail = NULL;
-
-	sub_frag_mem_limit(qp->q.net, head->truesize);
-
+	head = inet_frag_pull_head(&qp->q);
+	if (!head)
+		goto out;
 	head->dev = dev_get_by_index_rcu(net, qp->iif);
 	if (!head->dev)
 		goto out;
@@ -343,12 +274,10 @@
 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 {
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
-	struct rb_node **rbn, *parent;
-	struct sk_buff *skb1, *prev_tail;
-	int ihl, end, skb1_run_end;
+	int ihl, end, flags, offset;
+	struct sk_buff *prev_tail;
 	struct net_device *dev;
 	unsigned int fragsize;
-	int flags, offset;
 	int err = -ENOENT;
 	u8 ecn;
 
@@ -380,7 +309,7 @@
 		 */
 		if (end < qp->q.len ||
 		    ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
-			goto err;
+			goto discard_qp;
 		qp->q.flags |= INET_FRAG_LAST_IN;
 		qp->q.len = end;
 	} else {
@@ -392,82 +321,33 @@
 		if (end > qp->q.len) {
 			/* Some bits beyond end -> corruption. */
 			if (qp->q.flags & INET_FRAG_LAST_IN)
-				goto err;
+				goto discard_qp;
 			qp->q.len = end;
 		}
 	}
 	if (end == offset)
-		goto err;
+		goto discard_qp;
 
 	err = -ENOMEM;
 	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
-		goto err;
+		goto discard_qp;
 
 	err = pskb_trim_rcsum(skb, end - offset);
 	if (err)
-		goto err;
+		goto discard_qp;
 
 	/* Note : skb->rbnode and skb->dev share the same location. */
 	dev = skb->dev;
 	/* Makes sure compiler wont do silly aliasing games */
 	barrier();
 
-	/* RFC5722, Section 4, amended by Errata ID : 3089
-	 *                          When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments) MUST be silently discarded.
-	 *
-	 * We do the same here for IPv4 (and increment an snmp counter) but
-	 * we do not want to drop the whole queue in response to a duplicate
-	 * fragment.
-	 */
-
-	err = -EINVAL;
-	/* Find out where to put this fragment.  */
 	prev_tail = qp->q.fragments_tail;
-	if (!prev_tail)
-		ip4_frag_create_run(&qp->q, skb);  /* First fragment. */
-	else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
-		/* This is the common case: skb goes to the end. */
-		/* Detect and discard overlaps. */
-		if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
-			goto discard_qp;
-		if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
-			ip4_frag_append_to_last_run(&qp->q, skb);
-		else
-			ip4_frag_create_run(&qp->q, skb);
-	} else {
-		/* Binary search. Note that skb can become the first fragment,
-		 * but not the last (covered above).
-		 */
-		rbn = &qp->q.rb_fragments.rb_node;
-		do {
-			parent = *rbn;
-			skb1 = rb_to_skb(parent);
-			skb1_run_end = skb1->ip_defrag_offset +
-				       FRAG_CB(skb1)->frag_run_len;
-			if (end <= skb1->ip_defrag_offset)
-				rbn = &parent->rb_left;
-			else if (offset >= skb1_run_end)
-				rbn = &parent->rb_right;
-			else if (offset >= skb1->ip_defrag_offset &&
-				 end <= skb1_run_end)
-				goto err; /* No new data, potential duplicate */
-			else
-				goto discard_qp; /* Found an overlap */
-		} while (*rbn);
-		/* Here we have parent properly set, and rbn pointing to
-		 * one of its NULL left/right children. Insert skb.
-		 */
-		ip4_frag_init_run(skb);
-		rb_link_node(&skb->rbnode, parent, rbn);
-		rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
-	}
+	err = inet_frag_queue_insert(&qp->q, skb, offset, end);
+	if (err)
+		goto insert_error;
 
 	if (dev)
 		qp->iif = dev->ifindex;
-	skb->ip_defrag_offset = offset;
 
 	qp->q.stamp = skb->tstamp;
 	qp->q.meat += skb->len;
@@ -492,15 +372,24 @@
 		skb->_skb_refdst = 0UL;
 		err = ip_frag_reasm(qp, skb, prev_tail, dev);
 		skb->_skb_refdst = orefdst;
+		if (err)
+			inet_frag_kill(&qp->q);
 		return err;
 	}
 
 	skb_dst_drop(skb);
 	return -EINPROGRESS;
 
+insert_error:
+	if (err == IPFRAG_DUP) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	err = -EINVAL;
+	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 discard_qp:
 	inet_frag_kill(&qp->q);
-	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 err:
 	kfree_skb(skb);
 	return err;
@@ -512,12 +401,8 @@
 {
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
 	struct iphdr *iph;
-	struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
-	struct sk_buff **nextp; /* To build frag_list. */
-	struct rb_node *rbn;
-	int len;
-	int ihlen;
-	int err;
+	void *reasm_data;
+	int len, err;
 	u8 ecn;
 
 	ipq_kill(qp);
@@ -527,111 +412,23 @@
 		err = -EINVAL;
 		goto out_fail;
 	}
+
 	/* Make the one we just received the head. */
-	if (head != skb) {
-		fp = skb_clone(skb, GFP_ATOMIC);
-		if (!fp)
-			goto out_nomem;
-		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
-		if (RB_EMPTY_NODE(&skb->rbnode))
-			FRAG_CB(prev_tail)->next_frag = fp;
-		else
-			rb_replace_node(&skb->rbnode, &fp->rbnode,
-					&qp->q.rb_fragments);
-		if (qp->q.fragments_tail == skb)
-			qp->q.fragments_tail = fp;
-		skb_morph(skb, head);
-		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
-		rb_replace_node(&head->rbnode, &skb->rbnode,
-				&qp->q.rb_fragments);
-		consume_skb(head);
-		head = skb;
-	}
+	reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
+	if (!reasm_data)
+		goto out_nomem;
 
-	WARN_ON(head->ip_defrag_offset != 0);
-
-	/* Allocate a new buffer for the datagram. */
-	ihlen = ip_hdrlen(head);
-	len = ihlen + qp->q.len;
-
+	len = ip_hdrlen(skb) + qp->q.len;
 	err = -E2BIG;
 	if (len > 65535)
 		goto out_oversize;
 
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		goto out_nomem;
+	inet_frag_reasm_finish(&qp->q, skb, reasm_data);
 
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
+	skb->dev = dev;
+	IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
 
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (!clone)
-			goto out_nomem;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->truesize += clone->truesize;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-		add_frag_mem_limit(qp->q.net, clone->truesize);
-		skb_shinfo(head)->frag_list = clone;
-		nextp = &clone->next;
-	} else {
-		nextp = &skb_shinfo(head)->frag_list;
-	}
-
-	skb_push(head, head->data - skb_network_header(head));
-
-	/* Traverse the tree in order, to build frag_list. */
-	fp = FRAG_CB(head)->next_frag;
-	rbn = rb_next(&head->rbnode);
-	rb_erase(&head->rbnode, &qp->q.rb_fragments);
-	while (rbn || fp) {
-		/* fp points to the next sk_buff in the current run;
-		 * rbn points to the next run.
-		 */
-		/* Go through the current run. */
-		while (fp) {
-			*nextp = fp;
-			nextp = &fp->next;
-			fp->prev = NULL;
-			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
-			fp->sk = NULL;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			if (head->ip_summed != fp->ip_summed)
-				head->ip_summed = CHECKSUM_NONE;
-			else if (head->ip_summed == CHECKSUM_COMPLETE)
-				head->csum = csum_add(head->csum, fp->csum);
-			head->truesize += fp->truesize;
-			fp = FRAG_CB(fp)->next_frag;
-		}
-		/* Move to the next run. */
-		if (rbn) {
-			struct rb_node *rbnext = rb_next(rbn);
-
-			fp = rb_to_skb(rbn);
-			rb_erase(rbn, &qp->q.rb_fragments);
-			rbn = rbnext;
-		}
-	}
-	sub_frag_mem_limit(qp->q.net, head->truesize);
-
-	*nextp = NULL;
-	head->next = NULL;
-	head->prev = NULL;
-	head->dev = dev;
-	head->tstamp = qp->q.stamp;
-	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
-
-	iph = ip_hdr(head);
+	iph = ip_hdr(skb);
 	iph->tot_len = htons(len);
 	iph->tos |= ecn;
 
@@ -644,7 +441,7 @@
 	 * from one very small df-fragment and one large non-df frag.
 	 */
 	if (qp->max_df_size == qp->q.max_size) {
-		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+		IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
 		iph->frag_off = htons(IP_DF);
 	} else {
 		iph->frag_off = 0;
@@ -742,28 +539,6 @@
 }
 EXPORT_SYMBOL(ip_check_defrag);
 
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
-{
-	struct rb_node *p = rb_first(root);
-	unsigned int sum = 0;
-
-	while (p) {
-		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
-
-		p = rb_next(p);
-		rb_erase(&skb->rbnode, root);
-		while (skb) {
-			struct sk_buff *next = FRAG_CB(skb)->next_frag;
-
-			sum += skb->truesize;
-			kfree_skb(skb);
-			skb = next;
-		}
-	}
-	return sum;
-}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
-
 #ifdef CONFIG_SYSCTL
 static int dist_min;
 
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index bcadca2..ce0ce14 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -259,11 +259,10 @@
 		       ip_local_deliver_finish);
 }
 
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_options *opt;
 	const struct iphdr *iph;
-	struct net_device *dev = skb->dev;
 
 	/* It looks as overkill, because not all
 	   IP options require packet mangling.
@@ -299,7 +298,7 @@
 			}
 		}
 
-		if (ip_options_rcv_srr(skb))
+		if (ip_options_rcv_srr(skb, dev))
 			goto drop;
 	}
 
@@ -361,7 +360,7 @@
 	}
 #endif
 
-	if (iph->ihl > 5 && ip_rcv_options(skb))
+	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
 		goto drop;
 
 	rt = skb_rtable(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 4cd3b5a..570cdb5 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -614,7 +614,7 @@
 	}
 }
 
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_options *opt = &(IPCB(skb)->opt);
 	int srrspace, srrptr;
@@ -649,7 +649,7 @@
 
 		orefdst = skb->_skb_refdst;
 		skb_dst_set(skb, NULL);
-		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
 		rt2 = skb_rtable(skb);
 		if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
 			skb_dst_drop(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5fcafc8..4d3ef37 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -514,6 +514,7 @@
 	to->pkt_type = from->pkt_type;
 	to->priority = from->priority;
 	to->protocol = from->protocol;
+	to->skb_iif = from->skb_iif;
 	skb_dst_drop(to);
 	skb_dst_copy(to, from);
 	to->dev = from->dev;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 270e79f..4e39c93 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -678,9 +678,9 @@
 	return err;
 
 rtnl_link_failed:
-	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
-xfrm_tunnel_failed:
 	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
 xfrm_proto_comp_failed:
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
@@ -696,6 +696,7 @@
 static void __exit vti_fini(void)
 {
 	rtnl_link_unregister(&vti_link_ops);
+	xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a860df2..482cf7c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -169,6 +169,7 @@
  */
 static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
 {
+	int dif = inet_iif(skb);
 	struct sock *sk;
 	struct hlist_head *head;
 	int delivered = 0;
@@ -181,8 +182,7 @@
 
 	net = dev_net(skb->dev);
 	sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
-			     iph->saddr, iph->daddr,
-			     skb->dev->ifindex);
+			     iph->saddr, iph->daddr, dif);
 
 	while (sk) {
 		delivered = 1;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 94419b8..7cad5aa 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1174,11 +1174,39 @@
 	return dst;
 }
 
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+	struct ip_options opt;
+	int res;
+
+	/* Recompile ip options since IPCB may not be valid anymore.
+	 * Also check we have a reasonable ipv4 header.
+	 */
+	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+	    ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+		return;
+
+	memset(&opt, 0, sizeof(opt));
+	if (ip_hdr(skb)->ihl > 5) {
+		if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+			return;
+		opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+		rcu_read_lock();
+		res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+		rcu_read_unlock();
+
+		if (res)
+			return;
+	}
+	__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
 static void ipv4_link_failure(struct sk_buff *skb)
 {
 	struct rtable *rt;
 
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+	ipv4_send_dest_unreach(skb);
 
 	rt = skb_rtable(skb);
 	if (rt)
@@ -1619,6 +1647,10 @@
 		if (fnhe->fnhe_daddr == daddr) {
 			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
 				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+			/* set fnhe_daddr to 0 to ensure it won't bind with
+			 * new dsts in rt_bind_exception().
+			 */
+			fnhe->fnhe_daddr = 0;
 			fnhe_flush_routes(fnhe);
 			kfree_rcu(fnhe, rcu);
 			break;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 4487c71..b59d9cd 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -225,7 +225,12 @@
 	if (child) {
 		atomic_set(&req->rsk_refcnt, 1);
 		sock_rps_save_rxhash(child, skb);
-		inet_csk_reqsk_queue_add(sk, req, child);
+		if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+			bh_unlock_sock(child);
+			sock_put(child);
+			child = NULL;
+			reqsk_put(req);
+		}
 	} else {
 		reqsk_free(req);
 	}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index adc9ccc..76da06e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -45,6 +45,7 @@
 static int tcp_delack_seg_max = 60;
 static int tcp_use_userconfig_min;
 static int tcp_use_userconfig_max = 1;
+static int one_day_secs = 24 * 3600;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -479,7 +480,9 @@
 		.data		= &sysctl_tcp_min_rtt_wlen,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one_day_secs
 	},
 	{
 		.procname	= "tcp_low_latency",
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index a08cedf..910ef01 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -66,11 +66,6 @@
 module_param(dctcp_alpha_on_init, uint, 0644);
 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
 
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
-		 "parameter for clamping alpha on loss");
-
 static struct tcp_congestion_ops dctcp_reno;
 
 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@@ -211,21 +206,23 @@
 	}
 }
 
+static void dctcp_react_to_loss(struct sock *sk)
+{
+	struct dctcp *ca = inet_csk_ca(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	ca->loss_cwnd = tp->snd_cwnd;
+	tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
 static void dctcp_state(struct sock *sk, u8 new_state)
 {
-	if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
-		struct dctcp *ca = inet_csk_ca(sk);
-
-		/* If this extension is enabled, we clamp dctcp_alpha to
-		 * max on packet loss; the motivation is that dctcp_alpha
-		 * is an indicator to the extend of congestion and packet
-		 * loss is an indicator of extreme congestion; setting
-		 * this in practice turned out to be beneficial, and
-		 * effectively assumes total congestion which reduces the
-		 * window by half.
-		 */
-		ca->dctcp_alpha = DCTCP_MAX_ALPHA;
-	}
+	if (new_state == TCP_CA_Recovery &&
+	    new_state != inet_csk(sk)->icsk_ca_state)
+		dctcp_react_to_loss(sk);
+	/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+	 * one loss-adjustment per RTT.
+	 */
 }
 
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -237,6 +234,9 @@
 	case CA_EVENT_ECN_NO_CE:
 		dctcp_ce_state_1_to_0(sk);
 		break;
+	case CA_EVENT_LOSS:
+		dctcp_react_to_loss(sk);
+		break;
 	default:
 		/* Don't care for the rest. */
 		break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5f6dc5f..6c42ffc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -390,11 +390,12 @@
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	int room;
+
+	room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
 	/* Check #1 */
-	if (tp->rcv_ssthresh < tp->window_clamp &&
-	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
-	    !tcp_under_memory_pressure(sk)) {
+	if (room > 0 && !tcp_under_memory_pressure(sk)) {
 		int incr;
 
 		/* Check #2. Increase window, if skb with such overhead
@@ -407,8 +408,7 @@
 
 		if (incr) {
 			incr = max_t(int, incr, 2 * skb->len);
-			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-					       tp->window_clamp);
+			tp->rcv_ssthresh += min(room, incr);
 			inet_csk(sk)->icsk_ack.quick |= 1;
 		}
 	}
@@ -6376,13 +6376,7 @@
 			goto drop;
 	}
 
-
-	/* Accept backlog is full. If we have already queued enough
-	 * of warm entries in syn queue, drop request. It is better than
-	 * clogging syn queue with openreqs with exponentially increasing
-	 * timeout.
-	 */
-	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+	if (sk_acceptq_is_full(sk)) {
 		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 		goto drop;
 	}
@@ -6481,7 +6475,13 @@
 		af_ops->send_synack(fastopen_sk, dst, &fl, req,
 				    &foc, TCP_SYNACK_FASTOPEN);
 		/* Add the child socket directly into the accept queue */
-		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+			reqsk_fastopen_remove(fastopen_sk, req, false);
+			bh_unlock_sock(fastopen_sk);
+			sock_put(fastopen_sk);
+			reqsk_put(req);
+			goto drop;
+		}
 		sk->sk_data_ready(sk);
 		bh_unlock_sock(fastopen_sk);
 		sock_put(fastopen_sk);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index e62d76c9..16f03d8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -112,7 +112,8 @@
 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
 	const struct iphdr *iph = ip_hdr(skb);
-	u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+	int ihl = iph->ihl;
+	u8 *xprth = skb_network_header(skb) + ihl * 4;
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
@@ -123,6 +124,11 @@
 	fl4->flowi4_mark = skb->mark;
 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
 
+	fl4->flowi4_proto = iph->protocol;
+	fl4->daddr = reverse ? iph->saddr : iph->daddr;
+	fl4->saddr = reverse ? iph->daddr : iph->saddr;
+	fl4->flowi4_tos = iph->tos;
+
 	if (!ip_is_fragment(iph)) {
 		switch (iph->protocol) {
 		case IPPROTO_UDP:
@@ -134,7 +140,7 @@
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be16 *ports;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ports = (__be16 *)xprth;
 
 				fl4->fl4_sport = ports[!!reverse];
@@ -147,7 +153,7 @@
 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
 				u8 *icmp;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				icmp = xprth;
 
 				fl4->fl4_icmp_type = icmp[0];
@@ -160,7 +166,7 @@
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be32 *ehdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ehdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ehdr[0];
@@ -172,7 +178,7 @@
 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
 				__be32 *ah_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ah_hdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ah_hdr[1];
@@ -184,7 +190,7 @@
 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
 				__be16 *ipcomp_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				ipcomp_hdr = (__be16 *)xprth;
 
 				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
@@ -197,7 +203,7 @@
 				__be16 *greflags;
 				__be32 *gre_hdr;
 
-				xprth = skb_network_header(skb) + iph->ihl * 4;
+				xprth = skb_network_header(skb) + ihl * 4;
 				greflags = (__be16 *)xprth;
 				gre_hdr = (__be32 *)xprth;
 
@@ -214,10 +220,6 @@
 			break;
 		}
 	}
-	fl4->flowi4_proto = iph->protocol;
-	fl4->daddr = reverse ? iph->saddr : iph->daddr;
-	fl4->saddr = reverse ? iph->daddr : iph->saddr;
-	fl4->flowi4_tos = iph->tos;
 }
 
 static inline int xfrm4_garbage_collect(struct dst_ops *ops)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b82e439..8c2f9ae 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@
 	return fl;
 }
 
+static void fl_free_rcu(struct rcu_head *head)
+{
+	struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+	if (fl->share == IPV6_FL_S_PROCESS)
+		put_pid(fl->owner.pid);
+	kfree(fl->opt);
+	kfree(fl);
+}
+
 
 static void fl_free(struct ip6_flowlabel *fl)
 {
-	if (fl) {
-		if (fl->share == IPV6_FL_S_PROCESS)
-			put_pid(fl->owner.pid);
-		kfree(fl->opt);
-		kfree_rcu(fl, rcu);
-	}
+	if (fl)
+		call_rcu(&fl->rcu, fl_free_rcu);
 }
 
 static void fl_release(struct ip6_flowlabel *fl)
@@ -634,9 +640,9 @@
 				if (fl1->share == IPV6_FL_S_EXCL ||
 				    fl1->share != fl->share ||
 				    ((fl1->share == IPV6_FL_S_PROCESS) &&
-				     (fl1->owner.pid == fl->owner.pid)) ||
+				     (fl1->owner.pid != fl->owner.pid)) ||
 				    ((fl1->share == IPV6_FL_S_USER) &&
-				     uid_eq(fl1->owner.uid, fl->owner.uid)))
+				     !uid_eq(fl1->owner.uid, fl->owner.uid)))
 					goto release;
 
 				err = -ENOMEM;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 510a83f..8092480 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -601,7 +601,7 @@
 				inet6_sk(skb->sk) : NULL;
 	struct ipv6hdr *tmp_hdr;
 	struct frag_hdr *fh;
-	unsigned int mtu, hlen, left, len;
+	unsigned int mtu, hlen, left, len, nexthdr_offset;
 	int hroom, troom;
 	__be32 frag_id;
 	int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@
 		goto fail;
 	hlen = err;
 	nexthdr = *prevhdr;
+	nexthdr_offset = prevhdr - skb_network_header(skb);
 
 	mtu = ip6_skb_dst_mtu(skb);
 
@@ -646,6 +647,7 @@
 	    (err = skb_checksum_help(skb)))
 		goto fail;
 
+	prevhdr = skb_network_header(skb) + nexthdr_offset;
 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 	if (skb_has_frag_list(skb)) {
 		int first_len = skb_pagelen(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8855e89..bbc9748 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -634,7 +634,7 @@
 					   IPPROTO_IPIP,
 					   RT_TOS(eiph->tos), 0);
 		if (IS_ERR(rt) ||
-		    rt->dst.dev->type != ARPHRD_TUNNEL) {
+		    rt->dst.dev->type != ARPHRD_TUNNEL6) {
 			if (!IS_ERR(rt))
 				ip_rt_put(rt);
 			goto out;
@@ -644,7 +644,7 @@
 		ip_rt_put(rt);
 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
 				   skb2->dev) ||
-		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
 			goto out;
 	}
 
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e461853..1e1fa99 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -33,9 +33,8 @@
 
 #include <net/sock.h>
 #include <net/snmp.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
 
-#include <net/ipv6.h>
 #include <net/protocol.h>
 #include <net/transp_v6.h>
 #include <net/rawv6.h>
@@ -52,14 +51,6 @@
 
 static const char nf_frags_cache_name[] = "nf-frags";
 
-struct nf_ct_frag6_skb_cb
-{
-	struct inet6_skb_parm	h;
-	int			offset;
-};
-
-#define NFCT_FRAG6_CB(skb)	((struct nf_ct_frag6_skb_cb *)((skb)->cb))
-
 static struct inet_frags nf_frags;
 
 #ifdef CONFIG_SYSCTL
@@ -145,6 +136,9 @@
 }
 #endif
 
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			     struct sk_buff *prev_tail, struct net_device *dev);
+
 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 {
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -158,7 +152,7 @@
 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 	net = container_of(fq->q.net, struct net, nf_frag.frags);
 
-	ip6_expire_frag_queue(net, fq);
+	ip6frag_expire_frag_queue(net, fq);
 }
 
 /* Creation primitives. */
@@ -185,9 +179,10 @@
 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
 			     const struct frag_hdr *fhdr, int nhoff)
 {
-	struct sk_buff *prev, *next;
 	unsigned int payload_len;
-	int offset, end;
+	struct net_device *dev;
+	struct sk_buff *prev;
+	int offset, end, err;
 	u8 ecn;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE) {
@@ -262,55 +257,19 @@
 		goto err;
 	}
 
-	/* Find out which fragments are in front and at the back of us
-	 * in the chain of fragments so far.  We must know where to put
-	 * this fragment, right?
-	 */
+	/* Note : skb->rbnode and skb->dev share the same location. */
+	dev = skb->dev;
+	/* Makes sure compiler wont do silly aliasing games */
+	barrier();
+
 	prev = fq->q.fragments_tail;
-	if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
-		next = NULL;
-		goto found;
-	}
-	prev = NULL;
-	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (NFCT_FRAG6_CB(next)->offset >= offset)
-			break;	/* bingo! */
-		prev = next;
-	}
+	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+	if (err)
+		goto insert_error;
 
-found:
-	/* RFC5722, Section 4:
-	 *                                  When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments, including those not yet received) MUST be silently
-	 *   discarded.
-	 */
+	if (dev)
+		fq->iif = dev->ifindex;
 
-	/* Check for overlap with preceding fragment. */
-	if (prev &&
-	    (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
-		goto discard_fq;
-
-	/* Look for overlap with succeeding segment. */
-	if (next && NFCT_FRAG6_CB(next)->offset < end)
-		goto discard_fq;
-
-	NFCT_FRAG6_CB(skb)->offset = offset;
-
-	/* Insert this fragment in the chain of fragments. */
-	skb->next = next;
-	if (!next)
-		fq->q.fragments_tail = skb;
-	if (prev)
-		prev->next = skb;
-	else
-		fq->q.fragments = skb;
-
-	if (skb->dev) {
-		fq->iif = skb->dev->ifindex;
-		skb->dev = NULL;
-	}
 	fq->q.stamp = skb->tstamp;
 	fq->q.meat += skb->len;
 	fq->ecn |= ecn;
@@ -326,11 +285,25 @@
 		fq->q.flags |= INET_FRAG_FIRST_IN;
 	}
 
-	return 0;
+	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+	    fq->q.meat == fq->q.len) {
+		unsigned long orefdst = skb->_skb_refdst;
 
-discard_fq:
+		skb->_skb_refdst = 0UL;
+		err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+		skb->_skb_refdst = orefdst;
+		return err;
+	}
+
+	skb_dst_drop(skb);
+	return -EINPROGRESS;
+
+insert_error:
+	if (err == IPFRAG_DUP)
+		goto err;
 	inet_frag_kill(&fq->q);
 err:
+	skb_dst_drop(skb);
 	return -EINVAL;
 }
 
@@ -340,141 +313,67 @@
  *	It is called with locked fq, and caller must check that
  *	queue is eligible for reassembly i.e. it is not COMPLETE,
  *	the last and the first frames arrived and all the bits are here.
- *
- *	returns true if *prev skb has been transformed into the reassembled
- *	skb, false otherwise.
  */
-static bool
-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			     struct sk_buff *prev_tail, struct net_device *dev)
 {
-	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len;
+	void *reasm_data;
+	int payload_len;
 	u8 ecn;
 
 	inet_frag_kill(&fq->q);
 
-	WARN_ON(head == NULL);
-	WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
-
 	ecn = ip_frag_ecn_table[fq->ecn];
 	if (unlikely(ecn == 0xff))
-		return false;
+		goto err;
 
-	/* Unfragmented part is taken from the first segment. */
-	payload_len = ((head->data - skb_network_header(head)) -
+	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+	if (!reasm_data)
+		goto err;
+
+	payload_len = ((skb->data - skb_network_header(skb)) -
 		       sizeof(struct ipv6hdr) + fq->q.len -
 		       sizeof(struct frag_hdr));
 	if (payload_len > IPV6_MAXPLEN) {
 		net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
 				    payload_len);
-		return false;
-	}
-
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		return false;
-
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
-
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (clone == NULL)
-			return false;
-
-		clone->next = head->next;
-		head->next = clone;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->data_len -= clone->len;
-		head->len -= clone->len;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-
-		add_frag_mem_limit(fq->q.net, clone->truesize);
-	}
-
-	/* morph head into last received skb: prev.
-	 *
-	 * This allows callers of ipv6 conntrack defrag to continue
-	 * to use the last skb(frag) passed into the reasm engine.
-	 * The last skb frag 'silently' turns into the full reassembled skb.
-	 *
-	 * Since prev is also part of q->fragments we have to clone it first.
-	 */
-	if (head != prev) {
-		struct sk_buff *iter;
-
-		fp = skb_clone(prev, GFP_ATOMIC);
-		if (!fp)
-			return false;
-
-		fp->next = prev->next;
-
-		iter = head;
-		while (iter) {
-			if (iter->next == prev) {
-				iter->next = fp;
-				break;
-			}
-			iter = iter->next;
-		}
-
-		skb_morph(prev, head);
-		prev->next = head->next;
-		consume_skb(head);
-		head = prev;
+		goto err;
 	}
 
 	/* We have to remove fragment header from datagram and to relocate
 	 * header in order to calculate ICV correctly. */
-	skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
-	memmove(head->head + sizeof(struct frag_hdr), head->head,
-		(head->data - head->head) - sizeof(struct frag_hdr));
-	head->mac_header += sizeof(struct frag_hdr);
-	head->network_header += sizeof(struct frag_hdr);
+	skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
+	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+		(skb->data - skb->head) - sizeof(struct frag_hdr));
+	skb->mac_header += sizeof(struct frag_hdr);
+	skb->network_header += sizeof(struct frag_hdr);
 
-	skb_shinfo(head)->frag_list = head->next;
-	skb_reset_transport_header(head);
-	skb_push(head, head->data - skb_network_header(head));
+	skb_reset_transport_header(skb);
 
-	for (fp = head->next; fp; fp = fp->next) {
-		head->data_len += fp->len;
-		head->len += fp->len;
-		if (head->ip_summed != fp->ip_summed)
-			head->ip_summed = CHECKSUM_NONE;
-		else if (head->ip_summed == CHECKSUM_COMPLETE)
-			head->csum = csum_add(head->csum, fp->csum);
-		head->truesize += fp->truesize;
-		fp->sk = NULL;
-	}
-	sub_frag_mem_limit(fq->q.net, head->truesize);
+	inet_frag_reasm_finish(&fq->q, skb, reasm_data);
 
-	head->ignore_df = 1;
-	head->next = NULL;
-	head->dev = dev;
-	head->tstamp = fq->q.stamp;
-	ipv6_hdr(head)->payload_len = htons(payload_len);
-	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
-	IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+	skb->ignore_df = 1;
+	skb->dev = dev;
+	ipv6_hdr(skb)->payload_len = htons(payload_len);
+	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+	IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
 
 	/* Yes, and fold redundant checksum back. 8) */
-	if (head->ip_summed == CHECKSUM_COMPLETE)
-		head->csum = csum_partial(skb_network_header(head),
-					  skb_network_header_len(head),
-					  head->csum);
+	if (skb->ip_summed == CHECKSUM_COMPLETE)
+		skb->csum = csum_partial(skb_network_header(skb),
+					 skb_network_header_len(skb),
+					 skb->csum);
 
 	fq->q.fragments = NULL;
 	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
+	fq->q.last_run_head = NULL;
 
-	return true;
+	return 0;
+
+err:
+	inet_frag_kill(&fq->q);
+	return -EINVAL;
 }
 
 /*
@@ -543,7 +442,6 @@
 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
 	u16 savethdr = skb->transport_header;
-	struct net_device *dev = skb->dev;
 	int fhoff, nhoff, ret;
 	struct frag_hdr *fhdr;
 	struct frag_queue *fq;
@@ -566,10 +464,6 @@
 	hdr = ipv6_hdr(skb);
 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
-	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
-	    fhdr->frag_off & htons(IP6_MF))
-		return -EINVAL;
-
 	skb_orphan(skb);
 	fq = fq_find(net, fhdr->identification, user, hdr,
 		     skb->dev ? skb->dev->ifindex : 0);
@@ -581,24 +475,17 @@
 	spin_lock_bh(&fq->q.lock);
 
 	ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
-	if (ret < 0) {
-		if (ret == -EPROTO) {
-			skb->transport_header = savethdr;
-			ret = 0;
-		}
-		goto out_unlock;
+	if (ret == -EPROTO) {
+		skb->transport_header = savethdr;
+		ret = 0;
 	}
 
 	/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
 	 * must be returned.
 	 */
-	ret = -EINPROGRESS;
-	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-	    fq->q.meat == fq->q.len &&
-	    nf_ct_frag6_reasm(fq, skb, dev))
-		ret = 0;
+	if (ret)
+		ret = -EINPROGRESS;
 
-out_unlock:
 	spin_unlock_bh(&fq->q.lock);
 	inet_frag_put(&fq->q);
 	return ret;
@@ -634,16 +521,24 @@
 	.exit = nf_ct_net_exit,
 };
 
+static const struct rhashtable_params nfct_rhash_params = {
+	.head_offset		= offsetof(struct inet_frag_queue, node),
+	.hashfn			= ip6frag_key_hashfn,
+	.obj_hashfn		= ip6frag_obj_hashfn,
+	.obj_cmpfn		= ip6frag_obj_cmpfn,
+	.automatic_shrinking	= true,
+};
+
 int nf_ct_frag6_init(void)
 {
 	int ret = 0;
 
-	nf_frags.constructor = ip6_frag_init;
+	nf_frags.constructor = ip6frag_init;
 	nf_frags.destructor = NULL;
 	nf_frags.qsize = sizeof(struct frag_queue);
 	nf_frags.frag_expire = nf_ct_frag6_expire;
 	nf_frags.frags_cache_name = nf_frags_cache_name;
-	nf_frags.rhash_params = ip6_rhash_params;
+	nf_frags.rhash_params = nfct_rhash_params;
 	ret = inet_frags_init(&nf_frags);
 	if (ret)
 		goto out;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f06b047..c4070e9 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -14,8 +14,7 @@
 #include <linux/skbuff.h>
 #include <linux/icmp.h>
 #include <linux/sysctl.h>
-#include <net/ipv6.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 74ffbcb..4aed9c45 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -57,18 +57,11 @@
 #include <net/rawv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
 #include <net/inet_ecn.h>
 
 static const char ip6_frag_cache_name[] = "ip6-frags";
 
-struct ip6frag_skb_cb {
-	struct inet6_skb_parm	h;
-	int			offset;
-};
-
-#define FRAG6_CB(skb)	((struct ip6frag_skb_cb *)((skb)->cb))
-
 static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 {
 	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -76,63 +69,8 @@
 
 static struct inet_frags ip6_frags;
 
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
-			  struct net_device *dev);
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a)
-{
-	struct frag_queue *fq = container_of(q, struct frag_queue, q);
-	const struct frag_v6_compare_key *key = a;
-
-	q->key.v6 = *key;
-	fq->ecn = 0;
-}
-EXPORT_SYMBOL(ip6_frag_init);
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
-{
-	struct net_device *dev = NULL;
-	struct sk_buff *head;
-
-	rcu_read_lock();
-	spin_lock(&fq->q.lock);
-
-	if (fq->q.flags & INET_FRAG_COMPLETE)
-		goto out;
-
-	inet_frag_kill(&fq->q);
-
-	dev = dev_get_by_index_rcu(net, fq->iif);
-	if (!dev)
-		goto out;
-
-	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
-	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
-
-	/* Don't send error if the first segment did not arrive. */
-	head = fq->q.fragments;
-	if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
-		goto out;
-
-	/* But use as source device on which LAST ARRIVED
-	 * segment was received. And do not use fq->dev
-	 * pointer directly, device might already disappeared.
-	 */
-	head->dev = dev;
-	skb_get(head);
-	spin_unlock(&fq->q.lock);
-
-	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
-	kfree_skb(head);
-	goto out_rcu_unlock;
-
-out:
-	spin_unlock(&fq->q.lock);
-out_rcu_unlock:
-	rcu_read_unlock();
-	inet_frag_put(&fq->q);
-}
-EXPORT_SYMBOL(ip6_expire_frag_queue);
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			  struct sk_buff *prev_tail, struct net_device *dev);
 
 static void ip6_frag_expire(unsigned long data)
 {
@@ -142,7 +80,7 @@
 	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 	net = container_of(fq->q.net, struct net, ipv6.frags);
 
-	ip6_expire_frag_queue(net, fq);
+	ip6frag_expire_frag_queue(net, fq);
 }
 
 static struct frag_queue *
@@ -169,27 +107,29 @@
 }
 
 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
-			   struct frag_hdr *fhdr, int nhoff)
+			  struct frag_hdr *fhdr, int nhoff,
+			  u32 *prob_offset)
 {
-	struct sk_buff *prev, *next;
-	struct net_device *dev;
-	int offset, end;
 	struct net *net = dev_net(skb_dst(skb)->dev);
+	int offset, end, fragsize;
+	struct sk_buff *prev_tail;
+	struct net_device *dev;
+	int err = -ENOENT;
 	u8 ecn;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto err;
 
+	err = -EINVAL;
 	offset = ntohs(fhdr->frag_off) & ~0x7;
 	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
-		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-				IPSTATS_MIB_INHDRERRORS);
-		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
-				  ((u8 *)&fhdr->frag_off -
-				   skb_network_header(skb)));
+		*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
+		/* note that if prob_offset is set, the skb is freed elsewhere,
+		 * we do not free it here.
+		 */
 		return -1;
 	}
 
@@ -209,7 +149,7 @@
 		 */
 		if (end < fq->q.len ||
 		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
-			goto err;
+			goto discard_fq;
 		fq->q.flags |= INET_FRAG_LAST_IN;
 		fq->q.len = end;
 	} else {
@@ -220,84 +160,51 @@
 			/* RFC2460 says always send parameter problem in
 			 * this case. -DaveM
 			 */
-			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-					IPSTATS_MIB_INHDRERRORS);
-			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
-					  offsetof(struct ipv6hdr, payload_len));
+			*prob_offset = offsetof(struct ipv6hdr, payload_len);
 			return -1;
 		}
 		if (end > fq->q.len) {
 			/* Some bits beyond end -> corruption. */
 			if (fq->q.flags & INET_FRAG_LAST_IN)
-				goto err;
+				goto discard_fq;
 			fq->q.len = end;
 		}
 	}
 
 	if (end == offset)
-		goto err;
+		goto discard_fq;
 
+	err = -ENOMEM;
 	/* Point into the IP datagram 'data' part. */
 	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
-		goto err;
-
-	if (pskb_trim_rcsum(skb, end - offset))
-		goto err;
-
-	/* Find out which fragments are in front and at the back of us
-	 * in the chain of fragments so far.  We must know where to put
-	 * this fragment, right?
-	 */
-	prev = fq->q.fragments_tail;
-	if (!prev || FRAG6_CB(prev)->offset < offset) {
-		next = NULL;
-		goto found;
-	}
-	prev = NULL;
-	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (FRAG6_CB(next)->offset >= offset)
-			break;	/* bingo! */
-		prev = next;
-	}
-
-found:
-	/* RFC5722, Section 4, amended by Errata ID : 3089
-	 *                          When reassembling an IPv6 datagram, if
-	 *   one or more its constituent fragments is determined to be an
-	 *   overlapping fragment, the entire datagram (and any constituent
-	 *   fragments) MUST be silently discarded.
-	 */
-
-	/* Check for overlap with preceding fragment. */
-	if (prev &&
-	    (FRAG6_CB(prev)->offset + prev->len) > offset)
 		goto discard_fq;
 
-	/* Look for overlap with succeeding segment. */
-	if (next && FRAG6_CB(next)->offset < end)
+	err = pskb_trim_rcsum(skb, end - offset);
+	if (err)
 		goto discard_fq;
 
-	FRAG6_CB(skb)->offset = offset;
-
-	/* Insert this fragment in the chain of fragments. */
-	skb->next = next;
-	if (!next)
-		fq->q.fragments_tail = skb;
-	if (prev)
-		prev->next = skb;
-	else
-		fq->q.fragments = skb;
-
+	/* Note : skb->rbnode and skb->dev share the same location. */
 	dev = skb->dev;
-	if (dev) {
+	/* Makes sure compiler wont do silly aliasing games */
+	barrier();
+
+	prev_tail = fq->q.fragments_tail;
+	err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+	if (err)
+		goto insert_error;
+
+	if (dev)
 		fq->iif = dev->ifindex;
-		skb->dev = NULL;
-	}
+
 	fq->q.stamp = skb->tstamp;
 	fq->q.meat += skb->len;
 	fq->ecn |= ecn;
 	add_frag_mem_limit(fq->q.net, skb->truesize);
 
+	fragsize = -skb_network_offset(skb) + skb->len;
+	if (fragsize > fq->q.max_size)
+		fq->q.max_size = fragsize;
+
 	/* The first fragment.
 	 * nhoffset is obtained from the first fragment, of course.
 	 */
@@ -308,44 +215,48 @@
 
 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
 	    fq->q.meat == fq->q.len) {
-		int res;
 		unsigned long orefdst = skb->_skb_refdst;
 
 		skb->_skb_refdst = 0UL;
-		res = ip6_frag_reasm(fq, prev, dev);
+		err = ip6_frag_reasm(fq, skb, prev_tail, dev);
 		skb->_skb_refdst = orefdst;
-		return res;
+		return err;
 	}
 
 	skb_dst_drop(skb);
-	return -1;
+	return -EINPROGRESS;
 
+insert_error:
+	if (err == IPFRAG_DUP) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+	err = -EINVAL;
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_REASM_OVERLAPS);
 discard_fq:
 	inet_frag_kill(&fq->q);
-err:
 	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 			IPSTATS_MIB_REASMFAILS);
+err:
 	kfree_skb(skb);
-	return -1;
+	return err;
 }
 
 /*
  *	Check if this packet is complete.
- *	Returns NULL on failure by any reason, and pointer
- *	to current nexthdr field in reassembled frame.
  *
  *	It is called with locked fq, and caller must check that
  *	queue is eligible for reassembly i.e. it is not COMPLETE,
  *	the last and the first frames arrived and all the bits are here.
  */
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
-			  struct net_device *dev)
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+			  struct sk_buff *prev_tail, struct net_device *dev)
 {
 	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
-	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len;
 	unsigned int nhoff;
-	int sum_truesize;
+	void *reasm_data;
+	int payload_len;
 	u8 ecn;
 
 	inet_frag_kill(&fq->q);
@@ -354,113 +265,40 @@
 	if (unlikely(ecn == 0xff))
 		goto out_fail;
 
-	/* Make the one we just received the head. */
-	if (prev) {
-		head = prev->next;
-		fp = skb_clone(head, GFP_ATOMIC);
+	reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+	if (!reasm_data)
+		goto out_oom;
 
-		if (!fp)
-			goto out_oom;
-
-		fp->next = head->next;
-		if (!fp->next)
-			fq->q.fragments_tail = fp;
-		prev->next = fp;
-
-		skb_morph(head, fq->q.fragments);
-		head->next = fq->q.fragments->next;
-
-		consume_skb(fq->q.fragments);
-		fq->q.fragments = head;
-	}
-
-	WARN_ON(head == NULL);
-	WARN_ON(FRAG6_CB(head)->offset != 0);
-
-	/* Unfragmented part is taken from the first segment. */
-	payload_len = ((head->data - skb_network_header(head)) -
+	payload_len = ((skb->data - skb_network_header(skb)) -
 		       sizeof(struct ipv6hdr) + fq->q.len -
 		       sizeof(struct frag_hdr));
 	if (payload_len > IPV6_MAXPLEN)
 		goto out_oversize;
 
-	/* Head of list must not be cloned. */
-	if (skb_unclone(head, GFP_ATOMIC))
-		goto out_oom;
-
-	/* If the first fragment is fragmented itself, we split
-	 * it to two chunks: the first with data and paged part
-	 * and the second, holding only fragments. */
-	if (skb_has_frag_list(head)) {
-		struct sk_buff *clone;
-		int i, plen = 0;
-
-		clone = alloc_skb(0, GFP_ATOMIC);
-		if (!clone)
-			goto out_oom;
-		clone->next = head->next;
-		head->next = clone;
-		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
-		skb_frag_list_init(head);
-		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
-			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
-		clone->len = clone->data_len = head->data_len - plen;
-		head->data_len -= clone->len;
-		head->len -= clone->len;
-		clone->csum = 0;
-		clone->ip_summed = head->ip_summed;
-		add_frag_mem_limit(fq->q.net, clone->truesize);
-	}
-
 	/* We have to remove fragment header from datagram and to relocate
 	 * header in order to calculate ICV correctly. */
 	nhoff = fq->nhoffset;
-	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
-	memmove(head->head + sizeof(struct frag_hdr), head->head,
-		(head->data - head->head) - sizeof(struct frag_hdr));
-	if (skb_mac_header_was_set(head))
-		head->mac_header += sizeof(struct frag_hdr);
-	head->network_header += sizeof(struct frag_hdr);
+	skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
+	memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+		(skb->data - skb->head) - sizeof(struct frag_hdr));
+	if (skb_mac_header_was_set(skb))
+		skb->mac_header += sizeof(struct frag_hdr);
+	skb->network_header += sizeof(struct frag_hdr);
 
-	skb_reset_transport_header(head);
-	skb_push(head, head->data - skb_network_header(head));
+	skb_reset_transport_header(skb);
 
-	sum_truesize = head->truesize;
-	for (fp = head->next; fp;) {
-		bool headstolen;
-		int delta;
-		struct sk_buff *next = fp->next;
+	inet_frag_reasm_finish(&fq->q, skb, reasm_data);
 
-		sum_truesize += fp->truesize;
-		if (head->ip_summed != fp->ip_summed)
-			head->ip_summed = CHECKSUM_NONE;
-		else if (head->ip_summed == CHECKSUM_COMPLETE)
-			head->csum = csum_add(head->csum, fp->csum);
-
-		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-			kfree_skb_partial(fp, headstolen);
-		} else {
-			if (!skb_shinfo(head)->frag_list)
-				skb_shinfo(head)->frag_list = fp;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			head->truesize += fp->truesize;
-		}
-		fp = next;
-	}
-	sub_frag_mem_limit(fq->q.net, sum_truesize);
-
-	head->next = NULL;
-	head->dev = dev;
-	head->tstamp = fq->q.stamp;
-	ipv6_hdr(head)->payload_len = htons(payload_len);
-	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
-	IP6CB(head)->nhoff = nhoff;
-	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+	skb->dev = dev;
+	ipv6_hdr(skb)->payload_len = htons(payload_len);
+	ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+	IP6CB(skb)->nhoff = nhoff;
+	IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+	IP6CB(skb)->frag_max_size = fq->q.max_size;
 
 	/* Yes, and fold redundant checksum back. 8) */
-	skb_postpush_rcsum(head, skb_network_header(head),
-			   skb_network_header_len(head));
+	skb_postpush_rcsum(skb, skb_network_header(skb),
+			   skb_network_header_len(skb));
 
 	rcu_read_lock();
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
@@ -468,6 +306,7 @@
 	fq->q.fragments = NULL;
 	fq->q.rb_fragments = RB_ROOT;
 	fq->q.fragments_tail = NULL;
+	fq->q.last_run_head = NULL;
 	return 1;
 
 out_oversize:
@@ -479,6 +318,7 @@
 	rcu_read_lock();
 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 	rcu_read_unlock();
+	inet_frag_kill(&fq->q);
 	return -1;
 }
 
@@ -517,22 +357,26 @@
 		return 1;
 	}
 
-	if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
-	    fhdr->frag_off & htons(IP6_MF))
-		goto fail_hdr;
-
 	iif = skb->dev ? skb->dev->ifindex : 0;
 	fq = fq_find(net, fhdr->identification, hdr, iif);
 	if (fq) {
+		u32 prob_offset = 0;
 		int ret;
 
 		spin_lock(&fq->q.lock);
 
 		fq->iif = iif;
-		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
+		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
+				     &prob_offset);
 
 		spin_unlock(&fq->q.lock);
 		inet_frag_put(&fq->q);
+		if (prob_offset) {
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INHDRERRORS);
+			/* icmpv6_param_prob() calls kfree_skb(skb) */
+			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
+		}
 		return ret;
 	}
 
@@ -700,42 +544,19 @@
 	.exit = ipv6_frags_exit_net,
 };
 
-static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
-{
-	return jhash2(data,
-		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
-{
-	const struct inet_frag_queue *fq = data;
-
-	return jhash2((const u32 *)&fq->key.v6,
-		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
-{
-	const struct frag_v6_compare_key *key = arg->key;
-	const struct inet_frag_queue *fq = ptr;
-
-	return !!memcmp(&fq->key, key, sizeof(*key));
-}
-
-const struct rhashtable_params ip6_rhash_params = {
+static const struct rhashtable_params ip6_rhash_params = {
 	.head_offset		= offsetof(struct inet_frag_queue, node),
-	.hashfn			= ip6_key_hashfn,
-	.obj_hashfn		= ip6_obj_hashfn,
-	.obj_cmpfn		= ip6_obj_cmpfn,
+	.hashfn			= ip6frag_key_hashfn,
+	.obj_hashfn		= ip6frag_obj_hashfn,
+	.obj_cmpfn		= ip6frag_obj_cmpfn,
 	.automatic_shrinking	= true,
 };
-EXPORT_SYMBOL(ip6_rhash_params);
 
 int __init ipv6_frag_init(void)
 {
 	int ret;
 
-	ip6_frags.constructor = ip6_frag_init;
+	ip6_frags.constructor = ip6frag_init;
 	ip6_frags.destructor = NULL;
 	ip6_frags.qsize = sizeof(struct frag_queue);
 	ip6_frags.frag_expire = ip6_frag_expire;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 68c860d..4f808aa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3177,7 +3177,7 @@
 		table = rt->rt6i_table->tb6_id;
 	else
 		table = RT6_TABLE_UNSPEC;
-	rtm->rtm_table = table;
+	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
 	if (nla_put_u32(skb, RTA_TABLE, table))
 		goto nla_put_failure;
 	if (rt->rt6i_flags & RTF_REJECT) {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 75de3dd..47ca2a2 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -661,6 +661,10 @@
 		    !net_eq(tunnel->net, dev_net(tunnel->dev))))
 			goto out;
 
+		/* skb can be uncloned in iptunnel_pull_header, so
+		 * old iph is no longer valid
+		 */
+		iph = (const struct iphdr *)skb_mac_header(skb);
 		err = IP_ECN_decapsulate(iph, skb);
 		if (unlikely(err)) {
 			if (log_ecn_error)
@@ -767,8 +771,9 @@
 		pbw0 = tunnel->ip6rd.prefixlen >> 5;
 		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
 
-		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
-		    tunnel->ip6rd.relay_prefixlen;
+		d = tunnel->ip6rd.relay_prefixlen < 32 ?
+			(ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+		    tunnel->ip6rd.relay_prefixlen : 0;
 
 		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
 		if (pbi1 > 0)
@@ -1064,7 +1069,7 @@
 	if (!tdev && tunnel->parms.link)
 		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 
-	if (tdev) {
+	if (tdev && !netif_is_l3_master(tdev)) {
 		int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ed5d635..3201aa2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1062,11 +1062,11 @@
 		newnp->ipv6_fl_list = NULL;
 		newnp->pktoptions  = NULL;
 		newnp->opt	   = NULL;
-		newnp->mcast_oif   = tcp_v6_iif(skb);
-		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
-		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+		newnp->mcast_oif   = inet_iif(skb);
+		newnp->mcast_hops  = ip_hdr(skb)->ttl;
+		newnp->rcv_flowinfo = 0;
 		if (np->repflow)
-			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+			newnp->flow_label = 0;
 
 		/*
 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 3a2701d..07b7b25 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -391,6 +391,10 @@
 	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
 	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
 	unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+	/* Someone maybe has gotten the xfrm6_tunnel_spi.
+	 * So need to wait it.
+	 */
+	rcu_barrier();
 	kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
 }
 
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 553d0ad..2f3cd09 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2058,14 +2058,14 @@
 	if (err)
 		goto fail;
 
-	err = sock_register(&kcm_family_ops);
-	if (err)
-		goto sock_register_fail;
-
 	err = register_pernet_device(&kcm_net_ops);
 	if (err)
 		goto net_ops_fail;
 
+	err = sock_register(&kcm_family_ops);
+	if (err)
+		goto sock_register_fail;
+
 	err = kcm_proc_init();
 	if (err)
 		goto proc_init_fail;
@@ -2073,12 +2073,12 @@
 	return 0;
 
 proc_init_fail:
-	unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
 	sock_unregister(PF_KCM);
 
 sock_register_fail:
+	unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
 	proto_unregister(&kcm_proto);
 
 fail:
@@ -2094,8 +2094,8 @@
 static void __exit kcm_exit(void)
 {
 	kcm_proc_exit();
-	unregister_pernet_device(&kcm_net_ops);
 	sock_unregister(PF_KCM);
+	unregister_pernet_device(&kcm_net_ops);
 	proto_unregister(&kcm_proto);
 	destroy_workqueue(kcm_wq);
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c4b03d7..a007f11 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -680,9 +680,6 @@
 	if (flags & MSG_OOB)
 		goto out;
 
-	if (addr_len)
-		*addr_len = sizeof(*lsa);
-
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
@@ -712,6 +709,7 @@
 		lsa->l2tp_conn_id = 0;
 		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
 			lsa->l2tp_scope_id = inet6_iif(skb);
+		*addr_len = sizeof(*lsa);
 	}
 
 	if (np->rxopt.all)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 49c8a9c..1a169de 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1163,6 +1163,9 @@
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+	if (local->in_reconfig)
+		return;
+
 	if (!check_sdata_in_driver(sdata))
 		return;
 
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5768560..ad03331 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1937,6 +1937,9 @@
 	list_del_rcu(&sdata->list);
 	mutex_unlock(&sdata->local->iflist_mtx);
 
+	if (sdata->vif.txq)
+		ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
 	synchronize_rcu();
 
 	if (sdata->dev) {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 197753a..8c17d49 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
 	/* Use last four bytes of hw addr as hash index */
-	return jhash_1word(*(u32 *)(addr+2), seed);
+	return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fd186b0..8475e86 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1643,7 +1643,7 @@
 	if (!cp) {
 		int v;
 
-		if (!sysctl_schedule_icmp(ipvs))
+		if (ipip || !sysctl_schedule_icmp(ipvs))
 			return NF_ACCEPT;
 
 		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 93820e0..4ee8acd 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -191,10 +191,6 @@
 		else if (d > 0)
 			parent = parent->rb_right;
 		else {
-			if (!nft_set_elem_active(&rbe->ext, genmask)) {
-				parent = parent->rb_left;
-				continue;
-			}
 			if (nft_rbtree_interval_end(rbe) &&
 			    !nft_rbtree_interval_end(this)) {
 				parent = parent->rb_left;
@@ -203,6 +199,9 @@
 				   nft_rbtree_interval_end(this)) {
 				parent = parent->rb_right;
 				continue;
+			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+				parent = parent->rb_left;
+				continue;
 			}
 			nft_set_elem_change_active(net, set, &rbe->ext);
 			return rbe;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 751fec7..e065140 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1728,7 +1728,7 @@
 		seqcount_init(&per_cpu(xt_recseq, i));
 	}
 
-	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
+	xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
 	if (!xt)
 		return -ENOMEM;
 
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index bb33598..ec247d83 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -96,8 +96,7 @@
 static int physdev_mt_check(const struct xt_mtchk_param *par)
 {
 	const struct xt_physdev_info *info = par->matchinfo;
-
-	br_netfilter_enable();
+	static bool brnf_probed __read_mostly;
 
 	if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
 	    info->bitmask & ~XT_PHYSDEV_OP_MASK)
@@ -113,6 +112,12 @@
 		if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
 			return -EINVAL;
 	}
+
+	if (!brnf_probed) {
+		brnf_probed = true;
+		request_module("br_netfilter");
+	}
+
 	return 0;
 }
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index f135814..02d6f38 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -23,6 +23,7 @@
 #include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/ipv6_frag.h>
 
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <linux/netfilter/nf_nat.h>
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 3bd4d5d..50ea761 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1853,14 +1853,14 @@
 
 	struct sw_flow_actions *acts;
 	int new_acts_size;
-	int req_size = NLA_ALIGN(attr_len);
+	size_t req_size = NLA_ALIGN(attr_len);
 	int next_offset = offsetof(struct sw_flow_actions, actions) +
 					(*sfa)->actions_len;
 
 	if (req_size <= (ksize(*sfa) - next_offset))
 		goto out;
 
-	new_acts_size = ksize(*sfa) * 2;
+	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
 	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
 		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6542eff..7233ad8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2638,8 +2638,8 @@
 	void *ph;
 	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
 	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+	unsigned char *addr = NULL;
 	int tp_len, size_max;
-	unsigned char *addr;
 	void *data;
 	int len_sum = 0;
 	int status = TP_STATUS_AVAILABLE;
@@ -2650,7 +2650,6 @@
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
-		addr	= NULL;
 	} else {
 		err = -EINVAL;
 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2660,10 +2659,13 @@
 						sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
-		if (addr && dev && saddr->sll_halen < dev->addr_len)
-			goto out_put;
+		if (po->sk.sk_socket->type == SOCK_DGRAM) {
+			if (dev && msg->msg_namelen < dev->addr_len +
+				   offsetof(struct sockaddr_ll, sll_addr))
+				goto out_put;
+			addr = saddr->sll_addr;
+		}
 	}
 
 	err = -ENXIO;
@@ -2834,7 +2836,7 @@
 	struct sk_buff *skb;
 	struct net_device *dev;
 	__be16 proto;
-	unsigned char *addr;
+	unsigned char *addr = NULL;
 	int err, reserve = 0;
 	struct sockcm_cookie sockc;
 	struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2851,7 +2853,6 @@
 	if (likely(saddr == NULL)) {
 		dev	= packet_cached_dev_get(po);
 		proto	= po->num;
-		addr	= NULL;
 	} else {
 		err = -EINVAL;
 		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2859,10 +2860,13 @@
 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
-		if (addr && dev && saddr->sll_halen < dev->addr_len)
-			goto out_unlock;
+		if (sock->type == SOCK_DGRAM) {
+			if (dev && msg->msg_namelen < dev->addr_len +
+				   offsetof(struct sockaddr_ll, sll_addr))
+				goto out_unlock;
+			addr = saddr->sll_addr;
+		}
 	}
 
 	err = -ENXIO;
@@ -3278,7 +3282,7 @@
 	}
 
 	mutex_lock(&net->packet.sklist_lock);
-	sk_add_node_rcu(sk, &net->packet.sklist);
+	sk_add_node_tail_rcu(sk, &net->packet.sklist);
 	mutex_unlock(&net->packet.sklist_lock);
 
 	preempt_disable();
@@ -4229,7 +4233,7 @@
 	struct pgv *pg_vec;
 	int i;
 
-	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
 	if (unlikely(!pg_vec))
 		goto out;
 
@@ -4620,14 +4624,29 @@
 
 static int __init packet_init(void)
 {
-	int rc = proto_register(&packet_proto, 0);
+	int rc;
 
-	if (rc != 0)
+	rc = proto_register(&packet_proto, 0);
+	if (rc)
 		goto out;
+	rc = sock_register(&packet_family_ops);
+	if (rc)
+		goto out_proto;
+	rc = register_pernet_subsys(&packet_net_ops);
+	if (rc)
+		goto out_sock;
+	rc = register_netdevice_notifier(&packet_netdev_notifier);
+	if (rc)
+		goto out_pernet;
 
-	sock_register(&packet_family_ops);
-	register_pernet_subsys(&packet_net_ops);
-	register_netdevice_notifier(&packet_netdev_notifier);
+	return 0;
+
+out_pernet:
+	unregister_pernet_subsys(&packet_net_ops);
+out_sock:
+	sock_unregister(PF_PACKET);
+out_proto:
+	proto_unregister(&packet_proto);
 out:
 	return rc;
 }
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 850a86c..f6aa532 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -131,7 +131,7 @@
 	ph->utid = 0;
 	ph->message_id = id;
 	ph->pipe_handle = pn->pipe_handle;
-	ph->data[0] = code;
+	ph->error_code = code;
 	return pn_skb_send(sk, skb, NULL);
 }
 
@@ -152,7 +152,7 @@
 	ph->utid = id; /* whatever */
 	ph->message_id = id;
 	ph->pipe_handle = pn->pipe_handle;
-	ph->data[0] = code;
+	ph->error_code = code;
 	return pn_skb_send(sk, skb, NULL);
 }
 
@@ -207,7 +207,7 @@
 	struct pnpipehdr *ph;
 	struct sockaddr_pn dst;
 	u8 data[4] = {
-		oph->data[0], /* PEP type */
+		oph->pep_type, /* PEP type */
 		code, /* error code, at an unusual offset */
 		PAD, PAD,
 	};
@@ -220,7 +220,7 @@
 	ph->utid = oph->utid;
 	ph->message_id = PNS_PEP_CTRL_RESP;
 	ph->pipe_handle = oph->pipe_handle;
-	ph->data[0] = oph->data[1]; /* CTRL id */
+	ph->data0 = oph->data[0]; /* CTRL id */
 
 	pn_skb_get_src_sockaddr(oskb, &dst);
 	return pn_skb_send(sk, skb, &dst);
@@ -271,17 +271,17 @@
 		return -EINVAL;
 
 	hdr = pnp_hdr(skb);
-	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+	if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
 		net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
-				    (unsigned int)hdr->data[0]);
+				    (unsigned int)hdr->pep_type);
 		return -EOPNOTSUPP;
 	}
 
-	switch (hdr->data[1]) {
+	switch (hdr->data[0]) {
 	case PN_PEP_IND_FLOW_CONTROL:
 		switch (pn->tx_fc) {
 		case PN_LEGACY_FLOW_CONTROL:
-			switch (hdr->data[4]) {
+			switch (hdr->data[3]) {
 			case PEP_IND_BUSY:
 				atomic_set(&pn->tx_credits, 0);
 				break;
@@ -291,7 +291,7 @@
 			}
 			break;
 		case PN_ONE_CREDIT_FLOW_CONTROL:
-			if (hdr->data[4] == PEP_IND_READY)
+			if (hdr->data[3] == PEP_IND_READY)
 				atomic_set(&pn->tx_credits, wake = 1);
 			break;
 		}
@@ -300,12 +300,12 @@
 	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
 		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
 			break;
-		atomic_add(wake = hdr->data[4], &pn->tx_credits);
+		atomic_add(wake = hdr->data[3], &pn->tx_credits);
 		break;
 
 	default:
 		net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
-				    (unsigned int)hdr->data[1]);
+				    (unsigned int)hdr->data[0]);
 		return -EOPNOTSUPP;
 	}
 	if (wake)
@@ -317,7 +317,7 @@
 {
 	struct pep_sock *pn = pep_sk(sk);
 	struct pnpipehdr *hdr = pnp_hdr(skb);
-	u8 n_sb = hdr->data[0];
+	u8 n_sb = hdr->data0;
 
 	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
 	__skb_pull(skb, sizeof(*hdr));
@@ -505,7 +505,7 @@
 		return -ECONNREFUSED;
 
 	/* Parse sub-blocks */
-	n_sb = hdr->data[4];
+	n_sb = hdr->data[3];
 	while (n_sb > 0) {
 		u8 type, buf[6], len = sizeof(buf);
 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -738,7 +738,7 @@
 	ph->utid = 0;
 	ph->message_id = PNS_PIPE_REMOVE_REQ;
 	ph->pipe_handle = pn->pipe_handle;
-	ph->data[0] = PAD;
+	ph->data0 = PAD;
 	return pn_skb_send(sk, skb, NULL);
 }
 
@@ -815,7 +815,7 @@
 	peer_type = hdr->other_pep_type << 8;
 
 	/* Parse sub-blocks (options) */
-	n_sb = hdr->data[4];
+	n_sb = hdr->data[3];
 	while (n_sb > 0) {
 		u8 type, buf[1], len = sizeof(buf);
 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1106,7 +1106,7 @@
 	ph->utid = 0;
 	if (pn->aligned) {
 		ph->message_id = PNS_PIPE_ALIGNED_DATA;
-		ph->data[0] = 0; /* padding */
+		ph->data0 = 0; /* padding */
 	} else
 		ph->message_id = PNS_PIPE_DATA;
 	ph->pipe_handle = pn->pipe_handle;
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 4fe8f4f..da84d6b 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@
 	else
 		pool = rds_ibdev->mr_1m_pool;
 
+	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+	/* Switch pools if one of the pool is reaching upper limit */
+	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			pool = rds_ibdev->mr_1m_pool;
+		else
+			pool = rds_ibdev->mr_8k_pool;
+	}
+
 	ibmr = rds_ib_try_reuse_ibmr(pool);
 	if (ibmr)
 		return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 977f698..91b53d4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -442,9 +442,6 @@
 	struct rds_ib_mr *ibmr = NULL;
 	int iter = 0;
 
-	if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
-		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
 	while (1) {
 		ibmr = rds_ib_reuse_mr(pool);
 		if (ibmr)
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d36effb..2daba53 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -527,7 +527,7 @@
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
-		if (net != c_net || !tc->t_sock)
+		if (net != c_net)
 			continue;
 		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
 			list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7ca5774..7849f28 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -105,16 +105,17 @@
 	struct sk_buff *skb;
 	unsigned char  *dptr;
 	unsigned char  lci1, lci2;
-	char buffer[100];
-	int len, faclen = 0;
+	int maxfaclen = 0;
+	int len, faclen;
+	int reserve;
 
-	len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+	reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+	len = ROSE_MIN_LEN;
 
 	switch (frametype) {
 	case ROSE_CALL_REQUEST:
 		len   += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
-		faclen = rose_create_facilities(buffer, rose);
-		len   += faclen;
+		maxfaclen = 256;
 		break;
 	case ROSE_CALL_ACCEPTED:
 	case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@
 		break;
 	}
 
-	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+	skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+	if (!skb)
 		return;
 
 	/*
 	 *	Space for AX.25 header and PID.
 	 */
-	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+	skb_reserve(skb, reserve);
 
-	dptr = skb_put(skb, skb_tailroom(skb));
+	dptr = skb_put(skb, len);
 
 	lci1 = (rose->lci >> 8) & 0x0F;
 	lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@
 		dptr   += ROSE_ADDR_LEN;
 		memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
 		dptr   += ROSE_ADDR_LEN;
-		memcpy(dptr, buffer, faclen);
+		faclen = rose_create_facilities(dptr, rose);
+		skb_put(skb, faclen);
 		dptr   += faclen;
 		break;
 
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 60ef960..0fce919 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -355,7 +355,7 @@
 	 * normally have to take channel_lock but we do this before anyone else
 	 * can see the connection.
 	 */
-	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+	list_add(&call->chan_wait_link, &candidate->waiting_calls);
 
 	if (cp->exclusive) {
 		call->conn = candidate;
@@ -430,7 +430,7 @@
 	spin_lock(&conn->channel_lock);
 	call->conn = conn;
 	call->security_ix = conn->security_ix;
-	list_add(&call->chan_wait_link, &conn->waiting_calls);
+	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
 	spin_unlock(&conn->channel_lock);
 	_leave(" = 0 [extant %d]", conn->debug_id);
 	return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index a309a07..909848f 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -63,6 +63,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/sched/loadavg.h>
 #include <linux/string.h>
 #include <linux/skbuff.h>
 #include <linux/random.h>
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 8ea8217..d6af93a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -600,6 +600,7 @@
 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
 	/* No address mapping for V4 sockets */
+	memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
 	return sizeof(struct sockaddr_in);
 }
 
diff --git a/net/socket.c b/net/socket.c
index a523ac3..cff34b8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -615,6 +615,7 @@
 		if (inode)
 			inode_lock(inode);
 		sock->ops->release(sock);
+		sock->sk = NULL;
 		if (inode)
 			inode_unlock(inode);
 		sock->ops = NULL;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index cab50ec..cdcc0fe 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,7 @@
 	h->last_refresh = now;
 }
 
+static inline int cache_is_valid(struct cache_head *h);
 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 				struct cache_detail *detail);
 static void cache_fresh_unlocked(struct cache_head *head,
@@ -100,6 +101,8 @@
 			if (cache_is_expired(detail, tmp)) {
 				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
+				if (cache_is_valid(tmp) == -EAGAIN)
+					set_bit(CACHE_NEGATIVE, &tmp->flags);
 				cache_fresh_locked(tmp, 0, detail);
 				freeme = tmp;
 				break;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 236b043..9746941 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -62,6 +62,10 @@
 	INIT_LIST_HEAD(&tn->node_list);
 	spin_lock_init(&tn->node_list_lock);
 
+	err = tipc_socket_init();
+	if (err)
+		goto out_socket;
+
 	err = tipc_sk_rht_init(net);
 	if (err)
 		goto out_sk_rht;
@@ -88,6 +92,8 @@
 out_nametbl:
 	tipc_sk_rht_destroy(net);
 out_sk_rht:
+	tipc_socket_stop();
+out_socket:
 	return err;
 }
 
@@ -98,6 +104,7 @@
 	tipc_bcast_stop(net);
 	tipc_nametbl_stop(net);
 	tipc_sk_rht_destroy(net);
+	tipc_socket_stop();
 }
 
 static struct pernet_operations tipc_net_ops = {
@@ -125,10 +132,6 @@
 	if (err)
 		goto out_netlink_compat;
 
-	err = tipc_socket_init();
-	if (err)
-		goto out_socket;
-
 	err = tipc_register_sysctl();
 	if (err)
 		goto out_sysctl;
@@ -148,8 +151,6 @@
 out_pernet:
 	tipc_unregister_sysctl();
 out_sysctl:
-	tipc_socket_stop();
-out_socket:
 	tipc_netlink_compat_stop();
 out_netlink_compat:
 	tipc_netlink_stop();
@@ -164,7 +165,6 @@
 	unregister_pernet_subsys(&tipc_net_ops);
 	tipc_netlink_stop();
 	tipc_netlink_compat_stop();
-	tipc_socket_stop();
 	tipc_unregister_sysctl();
 
 	pr_info("Deactivated\n");
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d947b82..0cf9403 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -262,8 +262,14 @@
 	if (msg->rep_type)
 		tipc_tlv_init(msg->rep, msg->rep_type);
 
-	if (cmd->header)
-		(*cmd->header)(msg);
+	if (cmd->header) {
+		err = (*cmd->header)(msg);
+		if (err) {
+			kfree_skb(msg->rep);
+			msg->rep = NULL;
+			return err;
+		}
+	}
 
 	arg = nlmsg_new(0, GFP_KERNEL);
 	if (!arg) {
@@ -388,7 +394,12 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
-	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	len = TLV_GET_DATA_LEN(msg->req);
+	len -= offsetof(struct tipc_bearer_config, name);
+	if (len <= 0)
+		return -EINVAL;
+
+	len = min_t(int, len, TIPC_MAX_BEARER_NAME);
 	if (!string_is_valid(b->name, len))
 		return -EINVAL;
 
@@ -757,7 +768,12 @@
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	len = TLV_GET_DATA_LEN(msg->req);
+	len -= offsetof(struct tipc_link_config, name);
+	if (len <= 0)
+		return -EINVAL;
+
+	len = min_t(int, len, TIPC_MAX_LINK_NAME);
 	if (!string_is_valid(lc->name, len))
 		return -EINVAL;
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a3df5e1..f0c2197 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -891,7 +891,7 @@
 	addr->hash ^= sk->sk_type;
 
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
 	spin_unlock(&unix_table_lock);
 	err = 0;
@@ -1061,7 +1061,7 @@
 
 	err = 0;
 	__unix_remove_socket(sk);
-	u->addr = addr;
+	smp_store_release(&u->addr, addr);
 	__unix_insert_socket(list, sk);
 
 out_unlock:
@@ -1332,15 +1332,29 @@
 	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
 	otheru = unix_sk(other);
 
-	/* copy address information from listening to new sock*/
-	if (otheru->addr) {
-		atomic_inc(&otheru->addr->refcnt);
-		newu->addr = otheru->addr;
-	}
+	/* copy address information from listening to new sock
+	 *
+	 * The contents of *(otheru->addr) and otheru->path
+	 * are seen fully set up here, since we have found
+	 * otheru in hash under unix_table_lock.  Insertion
+	 * into the hash chain we'd found it in had been done
+	 * in an earlier critical area protected by unix_table_lock,
+	 * the same one where we'd set *(otheru->addr) contents,
+	 * as well as otheru->path and otheru->addr itself.
+	 *
+	 * Using smp_store_release() here to set newu->addr
+	 * is enough to make those stores, as well as stores
+	 * to newu->path visible to anyone who gets newu->addr
+	 * by smp_load_acquire().  IOW, the same warranties
+	 * as for unix_sock instances bound in unix_bind() or
+	 * in unix_autobind().
+	 */
 	if (otheru->path.dentry) {
 		path_get(&otheru->path);
 		newu->path = otheru->path;
 	}
+	atomic_inc(&otheru->addr->refcnt);
+	smp_store_release(&newu->addr, otheru->addr);
 
 	/* Set credentials */
 	copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@
 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
 {
 	struct sock *sk = sock->sk;
-	struct unix_sock *u;
+	struct unix_address *addr;
 	DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
 	int err = 0;
 
@@ -1468,19 +1482,15 @@
 		sock_hold(sk);
 	}
 
-	u = unix_sk(sk);
-	unix_state_lock(sk);
-	if (!u->addr) {
+	addr = smp_load_acquire(&unix_sk(sk)->addr);
+	if (!addr) {
 		sunaddr->sun_family = AF_UNIX;
 		sunaddr->sun_path[0] = 0;
 		*uaddr_len = sizeof(short);
 	} else {
-		struct unix_address *addr = u->addr;
-
 		*uaddr_len = addr->len;
 		memcpy(sunaddr, addr->name, *uaddr_len);
 	}
-	unix_state_unlock(sk);
 	sock_put(sk);
 out:
 	return err;
@@ -2094,11 +2104,11 @@
 
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
 {
-	struct unix_sock *u = unix_sk(sk);
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
-	if (u->addr) {
-		msg->msg_namelen = u->addr->len;
-		memcpy(msg->msg_name, u->addr->name, u->addr->len);
+	if (addr) {
+		msg->msg_namelen = addr->len;
+		memcpy(msg->msg_name, addr->name, addr->len);
 	}
 }
 
@@ -2814,7 +2824,7 @@
 			(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
 			sock_i_ino(s));
 
-		if (u->addr) {
+		if (u->addr) {	// under unix_table_lock here
 			int i, len;
 			seq_putc(seq, ' ');
 
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e..3183d9b 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
 
 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
 {
-	struct unix_address *addr = unix_sk(sk)->addr;
+	/* might or might not have unix_table_lock */
+	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
 	if (!addr)
 		return 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index f66a601..0bd5a60 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -600,28 +600,27 @@
 	if (!virtio_vsock_workqueue)
 		return -ENOMEM;
 
-	ret = register_virtio_driver(&virtio_vsock_driver);
+	ret = vsock_core_init(&virtio_transport.transport);
 	if (ret)
 		goto out_wq;
 
-	ret = vsock_core_init(&virtio_transport.transport);
+	ret = register_virtio_driver(&virtio_vsock_driver);
 	if (ret)
-		goto out_vdr;
+		goto out_vci;
 
 	return 0;
 
-out_vdr:
-	unregister_virtio_driver(&virtio_vsock_driver);
+out_vci:
+	vsock_core_exit();
 out_wq:
 	destroy_workqueue(virtio_vsock_workqueue);
 	return ret;
-
 }
 
 static void __exit virtio_vsock_exit(void)
 {
-	vsock_core_exit();
 	unregister_virtio_driver(&virtio_vsock_driver);
+	vsock_core_exit();
 	destroy_workqueue(virtio_vsock_workqueue);
 }
 
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 9c07c76..aa9d1c7 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -601,6 +601,8 @@
  */
 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
 {
+	const struct virtio_transport *t;
+	struct virtio_vsock_pkt *reply;
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_RST,
 		.type = le16_to_cpu(pkt->hdr.type),
@@ -611,15 +613,21 @@
 	if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
 		return 0;
 
-	pkt = virtio_transport_alloc_pkt(&info, 0,
-					 le64_to_cpu(pkt->hdr.dst_cid),
-					 le32_to_cpu(pkt->hdr.dst_port),
-					 le64_to_cpu(pkt->hdr.src_cid),
-					 le32_to_cpu(pkt->hdr.src_port));
-	if (!pkt)
+	reply = virtio_transport_alloc_pkt(&info, 0,
+					   le64_to_cpu(pkt->hdr.dst_cid),
+					   le32_to_cpu(pkt->hdr.dst_port),
+					   le64_to_cpu(pkt->hdr.src_cid),
+					   le32_to_cpu(pkt->hdr.src_port));
+	if (!reply)
 		return -ENOMEM;
 
-	return virtio_transport_get_ops()->send_pkt(pkt);
+	t = virtio_transport_get_ops();
+	if (!t) {
+		virtio_transport_free_pkt(reply);
+		return -ENOTCONN;
+	}
+
+	return t->send_pkt(reply);
 }
 
 static void virtio_transport_wait_close(struct sock *sk, long timeout)
@@ -717,12 +725,19 @@
 
 void virtio_transport_release(struct vsock_sock *vsk)
 {
+	struct virtio_vsock_sock *vvs = vsk->trans;
+	struct virtio_vsock_pkt *pkt, *tmp;
 	struct sock *sk = &vsk->sk;
 	bool remove_sock = true;
 
 	lock_sock(sk);
 	if (sk->sk_type == SOCK_STREAM)
 		remove_sock = virtio_transport_close(vsk);
+
+	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+		list_del(&pkt->list);
+		virtio_transport_free_pkt(pkt);
+	}
 	release_sock(sk);
 
 	if (remove_sock)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
old mode 100644
new mode 100755
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 0a7e5d9..770abab 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -678,8 +678,7 @@
 	struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
 	int len, i, rc = 0;
 
-	if (!sock_flag(sk, SOCK_ZAPPED) ||
-	    addr_len != sizeof(struct sockaddr_x25) ||
+	if (addr_len != sizeof(struct sockaddr_x25) ||
 	    addr->sx25_family != AF_X25) {
 		rc = -EINVAL;
 		goto out;
@@ -694,9 +693,13 @@
 	}
 
 	lock_sock(sk);
-	x25_sk(sk)->source_addr = addr->sx25_addr;
-	x25_insert_socket(sk);
-	sock_reset_flag(sk, SOCK_ZAPPED);
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		x25_sk(sk)->source_addr = addr->sx25_addr;
+		x25_insert_socket(sk);
+		sock_reset_flag(sk, SOCK_ZAPPED);
+	} else {
+		rc = -EINVAL;
+	}
 	release_sock(sk);
 	SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
 out:
@@ -812,8 +815,13 @@
 	sock->state = SS_CONNECTED;
 	rc = 0;
 out_put_neigh:
-	if (rc)
+	if (rc) {
+		read_lock_bh(&x25_list_lock);
 		x25_neigh_put(x25->neighbour);
+		x25->neighbour = NULL;
+		read_unlock_bh(&x25_list_lock);
+		x25->state = X25_STATE_0;
+	}
 out_put_route:
 	x25_route_put(rt);
 out:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f54bed8..7145c0c 100755
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1397,7 +1397,7 @@
 	ret = verify_policy_dir(p->dir);
 	if (ret)
 		return ret;
-	if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
 		return -EINVAL;
 
 	return 0;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 512cec5..f40431f 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -197,9 +197,7 @@
 
 # ld-option
 # Usage: LDFLAGS += $(call ld-option, -X)
-ld-option = $(call try-run,\
-	$(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
-	$(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
+ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
 
 # ar-option
 # Usage: KBUILD_ARFLAGS := $(call ar-option,D)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 409662f..c141b05 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5874,6 +5874,32 @@
 			}
 		}
 
+		# check for vsprintf extension %p<foo> misuses
+		if ($^V && $^V ge 5.10.0 &&
+		    defined $stat &&
+		    $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
+		    $1 !~ /^_*volatile_*$/) {
+			my $bad_extension = "";
+			my $lc = $stat =~ tr@\n@@;
+			$lc = $lc + $linenr;
+		        for (my $count = $linenr; $count <= $lc; $count++) {
+				my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
+				$fmt =~ s/%%//g;
+				if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
+					$bad_extension = $1;
+					last;
+				}
+			}
+			if ($bad_extension ne "") {
+				my $stat_real = raw_line($linenr, 0);
+				for (my $count = $linenr + 1; $count <= $lc; $count++) {
+					$stat_real = $stat_real . "\n" . raw_line($count, 0);
+				}
+				WARN("VSPRINTF_POINTER_EXTENSION",
+				     "Invalid vsprintf pointer extension '$bad_extension'\n" . "$here\n$stat_real\n");
+			}
+		}
+
 # Check for misused memsets
 		if ($^V && $^V ge 5.10.0 &&
 		    defined $stat &&
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index d58de1d..510049a 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -126,7 +126,8 @@
 			case KEY_DOWN:
 				break;
 			case KEY_BACKSPACE:
-			case 127:
+			case 8:   /* ^H */
+			case 127: /* ^? */
 				if (pos) {
 					wattrset(dialog, dlg.inputbox.atr);
 					if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index d42d534..f7049e2 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1046,7 +1046,7 @@
 		state->match_direction = FIND_NEXT_MATCH_UP;
 		*ans = get_mext_match(state->pattern,
 				state->match_direction);
-	} else if (key == KEY_BACKSPACE || key == 127) {
+	} else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
 		state->pattern[strlen(state->pattern)-1] = '\0';
 		adj_match_dir(&state->match_direction);
 	} else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 4b2f44c..9a65035 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@
 		case KEY_F(F_EXIT):
 		case KEY_F(F_BACK):
 			break;
-		case 127:
+		case 8:   /* ^H */
+		case 127: /* ^? */
 		case KEY_BACKSPACE:
 			if (cursor_position > 0) {
 				memmove(&result[cursor_position-1],
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 29d6699..55b4c0d 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -47,49 +47,9 @@
 struct devtable {
 	const char *device_id; /* name of table, __mod_<name>__*_device_table. */
 	unsigned long id_size;
-	void *function;
+	int (*do_entry)(const char *filename, void *symval, char *alias);
 };
 
-#define ___cat(a,b) a ## b
-#define __cat(a,b) ___cat(a,b)
-
-/* we need some special handling for this host tool running eventually on
- * Darwin. The Mach-O section handling is a bit different than ELF section
- * handling. The differnces in detail are:
- *  a) we have segments which have sections
- *  b) we need a API call to get the respective section symbols */
-#if defined(__MACH__)
-#include <mach-o/getsect.h>
-
-#define INIT_SECTION(name)  do {					\
-		unsigned long name ## _len;				\
-		char *__cat(pstart_,name) = getsectdata("__TEXT",	\
-			#name, &__cat(name,_len));			\
-		char *__cat(pstop_,name) = __cat(pstart_,name) +	\
-			__cat(name, _len);				\
-		__cat(__start_,name) = (void *)__cat(pstart_,name);	\
-		__cat(__stop_,name) = (void *)__cat(pstop_,name);	\
-	} while (0)
-#define SECTION(name)   __attribute__((section("__TEXT, " #name)))
-
-struct devtable **__start___devtable, **__stop___devtable;
-#else
-#define INIT_SECTION(name) /* no-op for ELF */
-#define SECTION(name)   __attribute__((section(#name)))
-
-/* We construct a table of pointers in an ELF section (pointers generally
- * go unpadded by gcc).  ld creates boundary syms for us. */
-extern struct devtable *__start___devtable[], *__stop___devtable[];
-#endif /* __MACH__ */
-
-#if !defined(__used)
-# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
-#  define __used			__attribute__((__unused__))
-# else
-#  define __used			__attribute__((__used__))
-# endif
-#endif
-
 /* Define a variable f that holds the value of field f of struct devid
  * based at address m.
  */
@@ -102,16 +62,6 @@
 #define DEF_FIELD_ADDR(m, devid, f) \
 	typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
 
-/* Add a table entry.  We test function type matches while we're here. */
-#define ADD_TO_DEVTABLE(device_id, type, function) \
-	static struct devtable __cat(devtable,__LINE__) = {	\
-		device_id + 0*sizeof((function)((const char *)NULL,	\
-						(void *)NULL,		\
-						(char *)NULL)),		\
-		SIZE_##type, (function) };				\
-	static struct devtable *SECTION(__devtable) __used \
-		__cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
-
 #define ADD(str, sep, cond, field)                              \
 do {                                                            \
         strcat(str, sep);                                       \
@@ -431,7 +381,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
 
 /* Looks like: ieee1394:venNmoNspNverN */
 static int do_ieee1394_entry(const char *filename,
@@ -456,7 +405,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
 
 /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
 static int do_pci_entry(const char *filename,
@@ -500,7 +448,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
 
 /* looks like: "ccw:tNmNdtNdmN" */
 static int do_ccw_entry(const char *filename,
@@ -524,7 +471,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
 
 /* looks like: "ap:tN" */
 static int do_ap_entry(const char *filename,
@@ -535,7 +481,6 @@
 	sprintf(alias, "ap:t%02X*", dev_type);
 	return 1;
 }
-ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
 
 /* looks like: "css:tN" */
 static int do_css_entry(const char *filename,
@@ -546,7 +491,6 @@
 	sprintf(alias, "css:t%01X", type);
 	return 1;
 }
-ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
 
 /* Looks like: "serio:tyNprNidNexN" */
 static int do_serio_entry(const char *filename,
@@ -566,7 +510,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
 
 /* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
  *             "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
@@ -604,7 +547,6 @@
 	}
 	return 1;
 }
-ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
 
 /* looks like: "pnp:dD" */
 static void do_pnp_device_entry(void *symval, unsigned long size,
@@ -725,7 +667,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
 static int do_vio_entry(const char *filename, void *symval,
 		char *alias)
@@ -745,7 +686,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -818,7 +758,6 @@
 		do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
 	return 1;
 }
-ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
 
 static int do_eisa_entry(const char *filename, void *symval,
 		char *alias)
@@ -830,7 +769,6 @@
 		strcat(alias, "*");
 	return 1;
 }
-ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
 
 /* Looks like: parisc:tNhvNrevNsvN */
 static int do_parisc_entry(const char *filename, void *symval,
@@ -850,7 +788,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
 
 /* Looks like: sdio:cNvNdN. */
 static int do_sdio_entry(const char *filename,
@@ -867,7 +804,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
 
 /* Looks like: ssb:vNidNrevN. */
 static int do_ssb_entry(const char *filename,
@@ -884,7 +820,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
 
 /* Looks like: bcma:mNidNrevNclN. */
 static int do_bcma_entry(const char *filename,
@@ -903,7 +838,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
 
 /* Looks like: virtio:dNvN */
 static int do_virtio_entry(const char *filename, void *symval,
@@ -919,7 +853,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
 
 /*
  * Looks like: vmbus:guid
@@ -942,7 +875,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
 
 /* Looks like: i2c:S */
 static int do_i2c_entry(const char *filename, void *symval,
@@ -953,7 +885,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
 
 /* Looks like: spi:S */
 static int do_spi_entry(const char *filename, void *symval,
@@ -964,7 +895,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
 
 static const struct dmifield {
 	const char *prefix;
@@ -1019,7 +949,6 @@
 	strcat(alias, ":");
 	return 1;
 }
-ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
 
 static int do_platform_entry(const char *filename,
 			     void *symval, char *alias)
@@ -1028,7 +957,6 @@
 	sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
 	return 1;
 }
-ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
 
 static int do_mdio_entry(const char *filename,
 			 void *symval, char *alias)
@@ -1053,7 +981,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
 
 /* Looks like: zorro:iN. */
 static int do_zorro_entry(const char *filename, void *symval,
@@ -1064,7 +991,6 @@
 	ADD(alias, "i", id != ZORRO_WILDCARD, id);
 	return 1;
 }
-ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
 
 /* looks like: "pnp:dD" */
 static int do_isapnp_entry(const char *filename,
@@ -1080,7 +1006,6 @@
 		(function >> 12) & 0x0f, (function >> 8) & 0x0f);
 	return 1;
 }
-ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
 
 /* Looks like: "ipack:fNvNdN". */
 static int do_ipack_entry(const char *filename,
@@ -1096,7 +1021,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
 
 /*
  * Append a match expression for a single masked hex digit.
@@ -1167,7 +1091,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
 
 /*
  * looks like: "mipscdmm:tN"
@@ -1183,7 +1106,6 @@
 	sprintf(alias, "mipscdmm:t%02X*", type);
 	return 1;
 }
-ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
 
 /* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
  * All fields are numbers. It would be nicer to use strings for vendor
@@ -1208,7 +1130,6 @@
 		sprintf(alias + strlen(alias), "%04X*", feature);
 	return 1;
 }
-ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
 
 /* LOOKS like cpu:type:*:feature:*FEAT* */
 static int do_cpu_entry(const char *filename, void *symval, char *alias)
@@ -1218,7 +1139,6 @@
 	sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
 	return 1;
 }
-ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
 
 /* Looks like: mei:S:uuid:N:* */
 static int do_mei_entry(const char *filename, void *symval,
@@ -1237,7 +1157,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
 
 /* Looks like: rapidio:vNdNavNadN */
 static int do_rio_entry(const char *filename,
@@ -1257,7 +1176,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
 
 /* Looks like: ulpi:vNpN */
 static int do_ulpi_entry(const char *filename, void *symval,
@@ -1270,7 +1188,6 @@
 
 	return 1;
 }
-ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
 
 /* Looks like: hdaudio:vNrNaN */
 static int do_hda_entry(const char *filename, void *symval, char *alias)
@@ -1287,7 +1204,6 @@
 	add_wildcard(alias);
 	return 1;
 }
-ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
 
 /* Looks like: fsl-mc:vNdN */
 static int do_fsl_mc_entry(const char *filename, void *symval,
@@ -1299,7 +1215,6 @@
 	sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
 	return 1;
 }
-ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
 
 /* Does namelen bytes of name exactly match the symbol? */
 static bool sym_is(const char *name, unsigned namelen, const char *symbol)
@@ -1313,12 +1228,11 @@
 static void do_table(void *symval, unsigned long size,
 		     unsigned long id_size,
 		     const char *device_id,
-		     void *function,
+		     int (*do_entry)(const char *filename, void *symval, char *alias),
 		     struct module *mod)
 {
 	unsigned int i;
 	char alias[500];
-	int (*do_entry)(const char *, void *entry, char *alias) = function;
 
 	device_id_check(mod->name, device_id, size, id_size, symval);
 	/* Leave last one: it's the terminator. */
@@ -1332,6 +1246,44 @@
 	}
 }
 
+static const struct devtable devtable[] = {
+	{"hid", SIZE_hid_device_id, do_hid_entry},
+	{"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
+	{"pci", SIZE_pci_device_id, do_pci_entry},
+	{"ccw", SIZE_ccw_device_id, do_ccw_entry},
+	{"ap", SIZE_ap_device_id, do_ap_entry},
+	{"css", SIZE_css_device_id, do_css_entry},
+	{"serio", SIZE_serio_device_id, do_serio_entry},
+	{"acpi", SIZE_acpi_device_id, do_acpi_entry},
+	{"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
+	{"vio", SIZE_vio_device_id, do_vio_entry},
+	{"input", SIZE_input_device_id, do_input_entry},
+	{"eisa", SIZE_eisa_device_id, do_eisa_entry},
+	{"parisc", SIZE_parisc_device_id, do_parisc_entry},
+	{"sdio", SIZE_sdio_device_id, do_sdio_entry},
+	{"ssb", SIZE_ssb_device_id, do_ssb_entry},
+	{"bcma", SIZE_bcma_device_id, do_bcma_entry},
+	{"virtio", SIZE_virtio_device_id, do_virtio_entry},
+	{"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
+	{"i2c", SIZE_i2c_device_id, do_i2c_entry},
+	{"spi", SIZE_spi_device_id, do_spi_entry},
+	{"dmi", SIZE_dmi_system_id, do_dmi_entry},
+	{"platform", SIZE_platform_device_id, do_platform_entry},
+	{"mdio", SIZE_mdio_device_id, do_mdio_entry},
+	{"zorro", SIZE_zorro_device_id, do_zorro_entry},
+	{"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
+	{"ipack", SIZE_ipack_device_id, do_ipack_entry},
+	{"amba", SIZE_amba_id, do_amba_entry},
+	{"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
+	{"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
+	{"cpu", SIZE_cpu_feature, do_cpu_entry},
+	{"mei", SIZE_mei_cl_device_id, do_mei_entry},
+	{"rapidio", SIZE_rio_device_id, do_rio_entry},
+	{"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
+	{"hdaudio", SIZE_hda_device_id, do_hda_entry},
+	{"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
+};
+
 /* Create MODULE_ALIAS() statements.
  * At this time, we cannot write the actual output C source yet,
  * so we write into the mod->dev_table_buf buffer. */
@@ -1386,13 +1338,14 @@
 	else if (sym_is(name, namelen, "pnp_card"))
 		do_pnp_card_entries(symval, sym->st_size, mod);
 	else {
-		struct devtable **p;
-		INIT_SECTION(__devtable);
+		int i;
 
-		for (p = __start___devtable; p < __stop___devtable; p++) {
-			if (sym_is(name, namelen, (*p)->device_id)) {
-				do_table(symval, sym->st_size, (*p)->id_size,
-					 (*p)->device_id, (*p)->function, mod);
+		for (i = 0; i < ARRAY_SIZE(devtable); i++) {
+			const struct devtable *p = &devtable[i];
+
+			if (sym_is(name, namelen, p->device_id)) {
+				do_table(symval, sym->st_size, p->id_size,
+					 p->device_id, p->do_entry, mod);
 				break;
 			}
 		}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 03c1652..db3bdc9 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -568,7 +568,7 @@
 		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
 			rc = dev_exception_add(devcg, ex);
 			if (rc)
-				break;
+				return rc;
 		} else {
 			/*
 			 * in the other possible cases:
diff --git a/security/keys/proc.c b/security/keys/proc.c
index ec493dd..f2c7e09 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -187,7 +187,7 @@
 
 	struct keyring_search_context ctx = {
 		.index_key		= key->index_key,
-		.cred			= current_cred(),
+		.cred			= m->file->f_cred,
 		.match_data.cmp		= lookup_user_key_possessed,
 		.match_data.raw_data	= key,
 		.match_data.lookup_type	= KEYRING_SEARCH_LOOKUP_DIRECT,
@@ -207,11 +207,7 @@
 		}
 	}
 
-	/* check whether the current task is allowed to view the key (assuming
-	 * non-possession)
-	 * - the caller holds a spinlock, and thus the RCU read lock, making our
-	 *   access to __current_cred() safe
-	 */
+	/* check whether the current task is allowed to view the key */
 	rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
 	if (rc < 0)
 		return 0;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 37f04da..44a20c2 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@
 		if (a->u.net->sk) {
 			struct sock *sk = a->u.net->sk;
 			struct unix_sock *u;
+			struct unix_address *addr;
 			int len = 0;
 			char *p = NULL;
 
@@ -351,14 +352,15 @@
 #endif
 			case AF_UNIX:
 				u = unix_sk(sk);
+				addr = smp_load_acquire(&u->addr);
+				if (!addr)
+					break;
 				if (u->path.dentry) {
 					audit_log_d_path(ab, " path=", &u->path);
 					break;
 				}
-				if (!u->addr)
-					break;
-				len = u->addr->len-sizeof(short);
-				p = &u->addr->name->sun_path[0];
+				len = addr->len-sizeof(short);
+				p = &addr->name->sun_path[0];
 				audit_log_format(ab, " path=");
 				if (*p)
 					audit_log_untrustedstring(ab, p);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 423a350..5adb50f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -468,22 +468,44 @@
 	return rc;
 }
 
-static int selinux_is_sblabel_mnt(struct super_block *sb)
+static int selinux_is_genfs_special_handling(struct super_block *sb)
 {
-	struct superblock_security_struct *sbsec = sb->s_security;
-
-	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
-		sbsec->behavior == SECURITY_FS_USE_TRANS ||
-		sbsec->behavior == SECURITY_FS_USE_TASK ||
-		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
-		/* Special handling. Genfs but also in-core setxattr handler */
-		!strcmp(sb->s_type->name, "sysfs") ||
+	/* Special handling. Genfs but also in-core setxattr handler */
+	return	!strcmp(sb->s_type->name, "sysfs") ||
 		!strcmp(sb->s_type->name, "pstore") ||
 		!strcmp(sb->s_type->name, "debugfs") ||
 		!strcmp(sb->s_type->name, "tracefs") ||
 		!strcmp(sb->s_type->name, "rootfs");
 }
 
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+	struct superblock_security_struct *sbsec = sb->s_security;
+
+	/*
+	 * IMPORTANT: Double-check logic in this function when adding a new
+	 * SECURITY_FS_USE_* definition!
+	 */
+	BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
+
+	switch (sbsec->behavior) {
+	case SECURITY_FS_USE_XATTR:
+	case SECURITY_FS_USE_TRANS:
+	case SECURITY_FS_USE_TASK:
+	case SECURITY_FS_USE_NATIVE:
+		return 1;
+
+	case SECURITY_FS_USE_GENFS:
+		return selinux_is_genfs_special_handling(sb);
+
+	/* Never allow relabeling on context mounts */
+	case SECURITY_FS_USE_MNTPOINT:
+	case SECURITY_FS_USE_NONE:
+	default:
+		return 0;
+	}
+}
+
 static int sb_finish_set_opts(struct super_block *sb)
 {
 	struct superblock_security_struct *sbsec = sb->s_security;
@@ -3284,12 +3306,16 @@
 				     const void *value, size_t size, int flags)
 {
 	struct inode_security_struct *isec = inode_security_novalidate(inode);
+	struct superblock_security_struct *sbsec = inode->i_sb->s_security;
 	u32 newsid;
 	int rc;
 
 	if (strcmp(name, XATTR_SELINUX_SUFFIX))
 		return -EOPNOTSUPP;
 
+	if (!(sbsec->flags & SBLABEL_MNT))
+		return -EOPNOTSUPP;
+
 	if (!value || !size)
 		return -EACCES;
 
@@ -6003,7 +6029,10 @@
  */
 static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
 {
-	return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
+	int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
+					   ctx, ctxlen, 0);
+	/* Do not return error when suppressing label (SBLABEL_MNT not set). */
+	return rc == -EOPNOTSUPP ? 0 : rc;
 }
 
 /*
diff --git a/sound/core/info.c b/sound/core/info.c
index 08558ae..d092d84 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -823,7 +823,12 @@
 	list_for_each_entry_safe(p, n, &entry->children, list)
 		snd_info_free_entry(p);
 
-	list_del(&entry->list);
+	p = entry->parent;
+	if (p) {
+		mutex_lock(&p->access);
+		list_del(&entry->list);
+		mutex_unlock(&p->access);
+	}
 	kfree(entry->name);
 	if (entry->private_free)
 		entry->private_free(entry);
diff --git a/sound/core/init.c b/sound/core/init.c
index c816648..f49f507 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -452,14 +452,7 @@
 	card->shutdown = 1;
 	spin_unlock(&card->files_lock);
 
-	/* phase 1: disable fops (user space) operations for ALSA API */
-	mutex_lock(&snd_card_mutex);
-	snd_cards[card->number] = NULL;
-	clear_bit(card->number, snd_cards_lock);
-	mutex_unlock(&snd_card_mutex);
-	
-	/* phase 2: replace file->f_op with special dummy operations */
-	
+	/* replace file->f_op with special dummy operations */
 	spin_lock(&card->files_lock);
 	list_for_each_entry(mfile, &card->files_list, list) {
 		/* it's critical part, use endless loop */
@@ -475,7 +468,7 @@
 	}
 	spin_unlock(&card->files_lock);	
 
-	/* phase 3: notify all connected devices about disconnection */
+	/* notify all connected devices about disconnection */
 	/* at this point, they cannot respond to any calls except release() */
 
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -491,6 +484,13 @@
 		device_del(&card->card_dev);
 		card->registered = false;
 	}
+
+	/* disable fops (user space) operations for ALSA API */
+	mutex_lock(&snd_card_mutex);
+	snd_cards[card->number] = NULL;
+	clear_bit(card->number, snd_cards_lock);
+	mutex_unlock(&snd_card_mutex);
+
 #ifdef CONFIG_PM
 	wake_up(&card->power_sleep);
 #endif
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index cfb8f58..8240975 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -951,6 +951,28 @@
 	oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
 			 params_channels(params) / 8;
 
+	err = snd_pcm_oss_period_size(substream, params, sparams);
+	if (err < 0)
+		goto failure;
+
+	n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+	if (err < 0)
+		goto failure;
+
+	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+				     runtime->oss.periods, NULL);
+	if (err < 0)
+		goto failure;
+
+	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+	err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+	if (err < 0) {
+		pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+		goto failure;
+	}
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
 	snd_pcm_oss_plugin_clear(substream);
 	if (!direct) {
@@ -985,27 +1007,6 @@
 	}
 #endif
 
-	err = snd_pcm_oss_period_size(substream, params, sparams);
-	if (err < 0)
-		goto failure;
-
-	n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-	if (err < 0)
-		goto failure;
-
-	err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-				     runtime->oss.periods, NULL);
-	if (err < 0)
-		goto failure;
-
-	snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-	if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-		pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-		goto failure;
-	}
-
 	if (runtime->oss.trigger) {
 		sw_params->start_threshold = 1;
 	} else {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
old mode 100644
new mode 100755
index a6fdf3a..0a747df
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1282,8 +1282,15 @@
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+	switch (runtime->status->state) {
+	case SNDRV_PCM_STATE_SUSPENDED:
 		return -EBUSY;
+	/* unresumable PCM state; return -EBUSY for skipping suspend */
+	case SNDRV_PCM_STATE_OPEN:
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_DISCONNECTED:
+		return -EBUSY;
+	}
 	runtime->trigger_master = substream;
 	return 0;
 }
@@ -1363,6 +1370,14 @@
 			/* FIXME: the open/close code should lock this as well */
 			if (substream->runtime == NULL)
 				continue;
+
+			/*
+			 * Skip BE dai link PCM's that are internal and may
+			 * not have their substream ops set.
+			 */
+			if (!substream->ops)
+				continue;
+
 			err = snd_pcm_suspend(substream);
 			if (err < 0 && err != -EBUSY)
 				return err;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index f217a1d..73a07da 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -592,6 +593,7 @@
 		return -ENXIO;
 	if (info->stream < 0 || info->stream > 1)
 		return -EINVAL;
+	info->stream = array_index_nospec(info->stream, 2);
 	pstr = &rmidi->streams[info->stream];
 	if (pstr->substream_count == 0)
 		return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb9..c939459 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
 	struct seq_oss_synth *rec;
+	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-	if (dev < 0 || dev >= dp->max_synthdev)
+	if (!info)
 		return -ENXIO;
 
-	if (dp->synths[dev].is_midi) {
+	if (info->is_midi) {
 		struct midi_info minf;
-		snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+		snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
 		inf->synth_type = SYNTH_TYPE_MIDI;
 		inf->synth_subtype = 0;
 		inf->nr_voices = 16;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 965473d..09491b2 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1249,7 +1249,7 @@
 
 	/* fill the info fields */
 	if (client_info->name[0])
-		strlcpy(client->name, client_info->name, sizeof(client->name));
+		strscpy(client->name, client_info->name, sizeof(client->name));
 
 	client->filter = client_info->filter;
 	client->event_lost = client_info->event_lost;
@@ -1527,7 +1527,7 @@
 	/* set queue name */
 	if (!info->name[0])
 		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
-	strlcpy(q->name, info->name, sizeof(q->name));
+	strscpy(q->name, info->name, sizeof(q->name));
 	snd_use_lock_free(&q->use_lock);
 
 	return 0;
@@ -1589,7 +1589,7 @@
 		queuefree(q);
 		return -EPERM;
 	}
-	strlcpy(q->name, info->name, sizeof(q->name));
+	strscpy(q->name, info->name, sizeof(q->name));
 	queuefree(q);
 
 	return 0;
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index a371c07..e267025 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -41,7 +41,7 @@
 
 /* Prototypes for opl3_drums.c */
 void snd_opl3_load_drums(struct snd_opl3 *opl3);
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
 
 /* Prototypes for opl3_oss.c */
 #ifdef CONFIG_SND_SEQUENCER_OSS
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3b4eaff..a205b93 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -474,7 +474,19 @@
 	/* Focusrite, SaffirePro 26 I/O */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
 	/* Focusrite, SaffirePro 10 I/O */
-	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+	{
+		// The combination of vendor_id and model_id is the same as the
+		// same as the one of Liquid Saffire 56.
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_MODEL_ID |
+				  IEEE1394_MATCH_SPECIFIER_ID |
+				  IEEE1394_MATCH_VERSION,
+		.vendor_id	= VEN_FOCUSRITE,
+		.model_id	= 0x000006,
+		.specifier_id	= 0x00a02d,
+		.version	= 0x010001,
+		.driver_data	= (kernel_ulong_t)&saffirepro_10_spec,
+	},
 	/* Focusrite, Saffire(no label and LE) */
 	SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
 			    &saffire_spec),
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index ad42d23..e75bfc5 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -111,6 +111,10 @@
 
 	/* block the 0x388 port to avoid PnP conflicts */
 	acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+	if (!acard->fm_res) {
+		err = -EBUSY;
+		goto _err;
+	}
 
 	if (port[dev] != SNDRV_AUTO_PORT) {
 		if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 286f5e3..d73ee11a 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1953,6 +1953,11 @@
 	}
 	chip->dsp_registers = (volatile u32 __iomem *)
 		ioremap_nocache(chip->dsp_registers_phys, sz);
+	if (!chip->dsp_registers) {
+		dev_err(chip->card->dev, "ioremap failed\n");
+		snd_echo_free(chip);
+		return -ENOMEM;
+	}
 
 	if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
 			KBUILD_MODNAME, chip)) {
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index c6b046d..1b5e217 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3004,6 +3004,7 @@
 		hda_jackpoll_work(&codec->jackpoll_work.work);
 	else
 		snd_hda_jack_report_sync(codec);
+	codec->core.dev.power.power_state = PMSG_ON;
 	atomic_dec(&codec->core.in_pm);
 }
 
@@ -3036,10 +3037,62 @@
 }
 #endif /* CONFIG_PM */
 
+#ifdef CONFIG_PM_SLEEP
+static int hda_codec_force_resume(struct device *dev)
+{
+	int ret;
+
+	/* The get/put pair below enforces the runtime resume even if the
+	 * device hasn't been used at suspend time.  This trick is needed to
+	 * update the jack state change during the sleep.
+	 */
+	pm_runtime_get_noresume(dev);
+	ret = pm_runtime_force_resume(dev);
+	pm_runtime_put(dev);
+	return ret;
+}
+
+static int hda_codec_pm_suspend(struct device *dev)
+{
+	dev->power.power_state = PMSG_SUSPEND;
+	return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_resume(struct device *dev)
+{
+	dev->power.power_state = PMSG_RESUME;
+	return hda_codec_force_resume(dev);
+}
+
+static int hda_codec_pm_freeze(struct device *dev)
+{
+	dev->power.power_state = PMSG_FREEZE;
+	return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_thaw(struct device *dev)
+{
+	dev->power.power_state = PMSG_THAW;
+	return hda_codec_force_resume(dev);
+}
+
+static int hda_codec_pm_restore(struct device *dev)
+{
+	dev->power.power_state = PMSG_RESTORE;
+	return hda_codec_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
 /* referred in hda_bind.c */
 const struct dev_pm_ops hda_codec_driver_pm = {
-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-				pm_runtime_force_resume)
+#ifdef CONFIG_PM_SLEEP
+	.suspend = hda_codec_pm_suspend,
+	.resume = hda_codec_pm_resume,
+	.freeze = hda_codec_pm_freeze,
+	.thaw = hda_codec_pm_thaw,
+	.poweroff = hda_codec_pm_suspend,
+	.restore = hda_codec_pm_restore,
+#endif /* CONFIG_PM_SLEEP */
 	SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
 			   NULL)
 };
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 76ae627..7a2943a3 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1447,9 +1447,11 @@
 	ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
 	jack = snd_hda_jack_tbl_get(codec, pin_nid);
-	if (jack)
+	if (jack) {
 		jack->block_report = !ret;
-
+		jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
+			AC_PINSENSE_PRESENCE : 0;
+	}
 	mutex_unlock(&per_pin->lock);
 	return ret;
 }
@@ -1554,6 +1556,11 @@
 	container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
 	struct hda_codec *codec = per_pin->codec;
 	struct hdmi_spec *spec = codec->spec;
+	struct hda_jack_tbl *jack;
+
+	jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+	if (jack)
+		jack->jack_dirty = 1;
 
 	if (per_pin->repoll_count++ > 6)
 		per_pin->repoll_count = 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0fc05eb..822650d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -773,11 +773,10 @@
 	if (spec->init_hook)
 		spec->init_hook(codec);
 
+	snd_hda_gen_init(codec);
 	alc_fix_pll(codec);
 	alc_auto_init_amp(codec, spec->init_amp);
 
-	snd_hda_gen_init(codec);
-
 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
 
 	return 0;
@@ -5855,7 +5854,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
-	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 84f86745..828bc61 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -643,6 +643,7 @@
 	.reg_defaults =		cs4270_reg_defaults,
 	.num_reg_defaults =	ARRAY_SIZE(cs4270_reg_defaults),
 	.cache_type =		REGCACHE_RBTREE,
+	.write_flag_mask =	CS4270_I2C_INCR,
 
 	.readable_reg =		cs4270_reg_is_readable,
 	.volatile_reg =		cs4270_reg_is_volatile,
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 584aab8..3e65dc7 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1209,14 +1209,14 @@
 		&max98090_right_rcv_mixer_controls[0],
 		ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
 
-	SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
-		M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
+	SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_linmod_mux),
 
-	SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
-		M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
+	SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_mixhplsel_mux),
 
-	SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
-		M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
+	SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
+		&max98090_mixhprsel_mux),
 
 	SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
 		M98090_HPLEN_SHIFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index e455186..2234d0c 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -414,9 +414,9 @@
 	SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
 		NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
 		ARRAY_SIZE(nau8810_mono_mixer_controls)),
-	SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+	SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
 		NAU8810_DAC_EN_SFT, 0),
-	SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+	SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
 		NAU8810_ADC_EN_SFT, 0),
 	SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
 		NAU8810_NSPK_EN_SFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 91879ea..01aa75c 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -60,13 +60,15 @@
  * RT5677_SPI_READ/WRITE_32:	Transfer 4 bytes
  * RT5677_SPI_READ/WRITE_BURST:	Transfer any multiples of 8 bytes
  *
- * For example, reading 260 bytes at 0x60030002 uses the following commands:
- * 0x60030002 RT5677_SPI_READ_16	2 bytes
+ * Note:
+ * 16 Bit writes and reads are restricted to the address range
+ * 0x18020000 ~ 0x18021000
+ *
+ * For example, reading 256 bytes at 0x60030004 uses the following commands:
  * 0x60030004 RT5677_SPI_READ_32	4 bytes
  * 0x60030008 RT5677_SPI_READ_BURST	240 bytes
  * 0x600300F8 RT5677_SPI_READ_BURST	8 bytes
  * 0x60030100 RT5677_SPI_READ_32	4 bytes
- * 0x60030104 RT5677_SPI_READ_16	2 bytes
  *
  * Input:
  * @read: true for read commands; false for write commands
@@ -81,15 +83,13 @@
 {
 	u8 cmd;
 
-	if (align == 2 || align == 6 || remain == 2) {
-		cmd = RT5677_SPI_READ_16;
-		*len = 2;
-	} else if (align == 4 || remain <= 6) {
+	if (align == 4 || remain <= 4) {
 		cmd = RT5677_SPI_READ_32;
 		*len = 4;
 	} else {
 		cmd = RT5677_SPI_READ_BURST;
-		*len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+		*len = (((remain - 1) >> 3) + 1) << 3;
+		*len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
 	}
 	return read ? cmd : cmd + 1;
 }
@@ -110,7 +110,7 @@
 	}
 }
 
-/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */
 int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
 {
 	u32 offset;
@@ -126,7 +126,7 @@
 	if (!g_spi)
 		return -ENODEV;
 
-	if ((addr & 1) || (len & 1)) {
+	if ((addr & 3) || (len & 3)) {
 		dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
 		return -EACCES;
 	}
@@ -161,13 +161,13 @@
 }
 EXPORT_SYMBOL_GPL(rt5677_spi_read);
 
-/* Write DSP address space using SPI. addr has to be 2-byte aligned.
- * If len is not 2-byte aligned, an extra byte of zero is written at the end
+/* Write DSP address space using SPI. addr has to be 4-byte aligned.
+ * If len is not 4-byte aligned, then extra zeros are written at the end
  * as padding.
  */
 int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
 {
-	u32 offset, len_with_pad = len;
+	u32 offset;
 	int status = 0;
 	struct spi_transfer t;
 	struct spi_message m;
@@ -180,22 +180,19 @@
 	if (!g_spi)
 		return -ENODEV;
 
-	if (addr & 1) {
+	if (addr & 3) {
 		dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
 		return -EACCES;
 	}
 
-	if (len & 1)
-		len_with_pad = len + 1;
-
 	memset(&t, 0, sizeof(t));
 	t.tx_buf = buf;
 	t.speed_hz = RT5677_SPI_FREQ;
 	spi_message_init_with_transfers(&m, &t, 1);
 
-	for (offset = 0; offset < len_with_pad;) {
+	for (offset = 0; offset < len;) {
 		spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
-				len_with_pad - offset, &t.len);
+				len - offset, &t.len);
 
 		/* Construct SPI message header */
 		buf[0] = spi_cmd;
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 28fdfc5..c27e347 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -316,6 +316,8 @@
 	SND_SOC_DAPM_INPUT("IN2_R"),
 	SND_SOC_DAPM_INPUT("IN3_L"),
 	SND_SOC_DAPM_INPUT("IN3_R"),
+	SND_SOC_DAPM_INPUT("CM_L"),
+	SND_SOC_DAPM_INPUT("CM_R"),
 };
 
 static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index fe45a16..ad11aa0 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -84,6 +84,7 @@
 	struct list_head list;
 	int master;
 	int gpio_reset;
+	bool reset_inverted;
 	int power;
 #define AIC3X_MODEL_3X 0
 #define AIC3X_MODEL_33 1
@@ -1353,7 +1354,8 @@
 		 * of the supplies was disabled
 		 */
 		if (gpio_is_valid(aic3x->gpio_reset))
-			gpio_set_value(aic3x->gpio_reset, 0);
+			gpio_set_value(aic3x->gpio_reset,
+				       aic3x->reset_inverted);
 		regcache_mark_dirty(aic3x->regmap);
 	}
 
@@ -1375,7 +1377,8 @@
 
 		if (gpio_is_valid(aic3x->gpio_reset)) {
 			udelay(1);
-			gpio_set_value(aic3x->gpio_reset, 1);
+			gpio_set_value(aic3x->gpio_reset,
+				       !aic3x->reset_inverted);
 		}
 
 		/* Sync reg_cache with the hardware */
@@ -1761,6 +1764,9 @@
 		else
 			aic3x->gpio_reset = -1;
 
+		aic3x->reset_inverted =
+			of_property_read_bool(np, "reset-inverted");
+
 		if (of_property_read_u32_array(np, "ai3x-gpio-func",
 					ai3x_setup->gpio_func, 2) >= 0) {
 			aic3x->setup = ai3x_setup;
@@ -1797,7 +1803,8 @@
 		ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
 		if (ret != 0)
 			goto err;
-		gpio_direction_output(aic3x->gpio_reset, 0);
+		gpio_direction_output(aic3x->gpio_reset,
+				      aic3x->reset_inverted);
 	}
 
 	for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
@@ -1843,7 +1850,7 @@
 	snd_soc_unregister_codec(&client->dev);
 	if (gpio_is_valid(aic3x->gpio_reset) &&
 	    !aic3x_is_shared_reset(aic3x)) {
-		gpio_set_value(aic3x->gpio_reset, 0);
+		gpio_set_value(aic3x->gpio_reset, aic3x->reset_inverted);
 		gpio_free(aic3x->gpio_reset);
 	}
 	return 0;
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index dffd549..705d252 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -689,6 +689,7 @@
 asrc_fail:
 	of_node_put(asrc_np);
 	of_node_put(codec_np);
+	put_device(&cpu_pdev->dev);
 fail:
 	of_node_put(cpu_np);
 
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 3ef1745..fa64cc2 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -59,6 +59,8 @@
 	u32 fifo_depth;
 	u32 slot_width;
 	u32 slots;
+	u32 tx_mask;
+	u32 rx_mask;
 	u32 hck_rate[2];
 	u32 sck_rate[2];
 	bool hck_dir[2];
@@ -359,21 +361,13 @@
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
 			   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-	regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
-			   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
-	regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
-			   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
-
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
 			   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-	regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
-			   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
-	regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
-			   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
-
 	esai_priv->slot_width = slot_width;
 	esai_priv->slots = slots;
+	esai_priv->tx_mask = tx_mask;
+	esai_priv->rx_mask = rx_mask;
 
 	return 0;
 }
@@ -396,7 +390,8 @@
 		break;
 	case SND_SOC_DAIFMT_RIGHT_J:
 		/* Data on rising edge of bclk, frame high, right aligned */
-		xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
+		xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
+		xcr  |= ESAI_xCR_xWA;
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
 		/* Data on rising edge of bclk, frame high, 1clk before data */
@@ -453,12 +448,12 @@
 		return -EINVAL;
 	}
 
-	mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
+	mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
 
 	mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
-		ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
+		ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
 	regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
 
@@ -593,6 +588,7 @@
 	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
 	u8 i, channels = substream->runtime->channels;
 	u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+	u32 mask;
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
@@ -605,15 +601,38 @@
 		for (i = 0; tx && i < channels; i++)
 			regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
 
+		/*
+		 * When set the TE/RE in the end of enablement flow, there
+		 * will be channel swap issue for multi data line case.
+		 * In order to workaround this issue, we switch the bit
+		 * enablement sequence to below sequence
+		 * 1) clear the xSMB & xSMA: which is done in probe and
+		 *                           stop state.
+		 * 2) set TE/RE
+		 * 3) set xSMB
+		 * 4) set xSMA:  xSMA is the last one in this flow, which
+		 *               will trigger esai to start.
+		 */
 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
 				   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
 				   tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
+		mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
+
+		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+				   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
+		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+				   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
+
 		break;
 	case SNDRV_PCM_TRIGGER_SUSPEND:
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
 				   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
+		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+				   ESAI_xSMA_xS_MASK, 0);
+		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+				   ESAI_xSMB_xS_MASK, 0);
 
 		/* Disable and reset FIFO */
 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
@@ -903,6 +922,15 @@
 		return ret;
 	}
 
+	esai_priv->tx_mask = 0xFFFFFFFF;
+	esai_priv->rx_mask = 0xFFFFFFFF;
+
+	/* Clear the TSMA, TSMB, RSMA, RSMB */
+	regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
+	regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
+	regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
+	regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
+
 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
 					      &fsl_esai_dai, 1);
 	if (ret) {
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index b99e0b5..8e525f7 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -115,6 +115,7 @@
 		ret = -EPROBE_DEFER;
 		goto fail;
 	}
+	put_device(&ssi_pdev->dev);
 	codec_dev = of_find_i2c_device_by_node(codec_np);
 	if (!codec_dev) {
 		dev_err(&pdev->dev, "failed to find codec platform device\n");
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 79a9fdf..582b30a 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -1252,11 +1252,15 @@
 		goto irq_err;
 
 	err = sst_dma_new(sst);
-	if (err)
-		dev_warn(dev, "sst_dma_new failed %d\n", err);
+	if (err)  {
+		dev_err(dev, "sst_dma_new failed %d\n", err);
+		goto dma_err;
+	}
 
 	return sst;
 
+dma_err:
+	free_irq(sst->irq, sst);
 irq_err:
 	if (sst->ops->free)
 		sst->ops->free(sst);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 7fb237b..2553d9c 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -966,10 +966,13 @@
 		codec_params = *params;
 
 		/* fixup params based on TDM slot masks */
-		if (codec_dai->tx_mask)
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+		    codec_dai->tx_mask)
 			soc_pcm_codec_params_fixup(&codec_params,
 						   codec_dai->tx_mask);
-		if (codec_dai->rx_mask)
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+		    codec_dai->rx_mask)
 			soc_pcm_codec_params_fixup(&codec_params,
 						   codec_dai->rx_mask);
 
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d6b48c7..086fe4d 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1989,6 +1989,7 @@
 	struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
 {
 	struct soc_tplg tplg;
+	int ret;
 
 	/* setup parsing context */
 	memset(&tplg, 0, sizeof(tplg));
@@ -2002,7 +2003,12 @@
 	tplg.bytes_ext_ops = ops->bytes_ext_ops;
 	tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
 
-	return soc_tplg_load(&tplg);
+	ret = soc_tplg_load(&tplg);
+	/* free the created components if fail to load topology */
+	if (ret)
+		snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
 
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 58d6249..0918924 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -337,12 +337,16 @@
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
-	unsigned char len;
+	unsigned char *len;
 	unsigned count;
 
 	if (address > 0xffff || datalen > 0xff)
 		return -EINVAL;
 
+	len = kmalloc(sizeof(*len), GFP_KERNEL);
+	if (!len)
+		return -ENOMEM;
+
 	/* query the serial number: */
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -351,7 +355,7 @@
 
 	if (ret < 0) {
 		dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* Wait for data length. We'll get 0xff until length arrives. */
@@ -361,28 +365,29 @@
 		ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
 				      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
 				      USB_DIR_IN,
-				      0x0012, 0x0000, &len, 1,
+				      0x0012, 0x0000, len, 1,
 				      LINE6_TIMEOUT * HZ);
 		if (ret < 0) {
 			dev_err(line6->ifcdev,
 				"receive length failed (error %d)\n", ret);
-			return ret;
+			goto exit;
 		}
 
-		if (len != 0xff)
+		if (*len != 0xff)
 			break;
 	}
 
-	if (len == 0xff) {
+	ret = -EIO;
+	if (*len == 0xff) {
 		dev_err(line6->ifcdev, "read failed after %d retries\n",
 			count);
-		return -EIO;
-	} else if (len != datalen) {
+		goto exit;
+	} else if (*len != datalen) {
 		/* should be equal or something went wrong */
 		dev_err(line6->ifcdev,
 			"length mismatch (expected %d, got %d)\n",
-			(int)datalen, (int)len);
-		return -EIO;
+			(int)datalen, (int)*len);
+		goto exit;
 	}
 
 	/* receive the result: */
@@ -391,12 +396,12 @@
 			      0x0013, 0x0000, data, datalen,
 			      LINE6_TIMEOUT * HZ);
 
-	if (ret < 0) {
+	if (ret < 0)
 		dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
-		return ret;
-	}
 
-	return 0;
+exit:
+	kfree(len);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(line6_read_data);
 
@@ -408,12 +413,16 @@
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
-	unsigned char status;
+	unsigned char *status;
 	int count;
 
 	if (address > 0xffff || datalen > 0xffff)
 		return -EINVAL;
 
+	status = kmalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		return -ENOMEM;
+
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 			      0x0022, address, data, datalen,
@@ -422,7 +431,7 @@
 	if (ret < 0) {
 		dev_err(line6->ifcdev,
 			"write request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@@ -433,28 +442,29 @@
 				      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
 				      USB_DIR_IN,
 				      0x0012, 0x0000,
-				      &status, 1, LINE6_TIMEOUT * HZ);
+				      status, 1, LINE6_TIMEOUT * HZ);
 
 		if (ret < 0) {
 			dev_err(line6->ifcdev,
 				"receiving status failed (error %d)\n", ret);
-			return ret;
+			goto exit;
 		}
 
-		if (status != 0xff)
+		if (*status != 0xff)
 			break;
 	}
 
-	if (status == 0xff) {
+	if (*status == 0xff) {
 		dev_err(line6->ifcdev, "write failed after %d retries\n",
 			count);
-		return -EIO;
-	} else if (status != 0) {
+		ret = -EIO;
+	} else if (*status != 0) {
 		dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
-		return -EIO;
+		ret = -EIO;
 	}
-
-	return 0;
+exit:
+	kfree(status);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(line6_write_data);
 
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 5ab9e0c..c0b6733 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -221,28 +221,32 @@
 static int podhd_dev_start(struct usb_line6_podhd *pod)
 {
 	int ret;
-	u8 init_bytes[8];
+	u8 *init_bytes;
 	int i;
 	struct usb_device *usbdev = pod->line6.usbdev;
 
+	init_bytes = kmalloc(8, GFP_KERNEL);
+	if (!init_bytes)
+		return -ENOMEM;
+
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
 					0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 					0x11, 0,
 					NULL, 0, LINE6_TIMEOUT * HZ);
 	if (ret < 0) {
 		dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* NOTE: looks like some kind of ping message */
 	ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
 					USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
 					0x11, 0x0,
-					&init_bytes, 3, LINE6_TIMEOUT * HZ);
+					init_bytes, 3, LINE6_TIMEOUT * HZ);
 	if (ret < 0) {
 		dev_err(pod->line6.ifcdev,
 			"receive length failed (error %d)\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	pod->firmware_version =
@@ -251,7 +255,7 @@
 	for (i = 0; i <= 16; i++) {
 		ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
 		if (ret < 0)
-			return ret;
+			goto exit;
 	}
 
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@@ -259,10 +263,9 @@
 					USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
 					1, 0,
 					NULL, 0, LINE6_TIMEOUT * HZ);
-	if (ret < 0)
-		return ret;
-
-	return 0;
+exit:
+	kfree(init_bytes);
+	return ret;
 }
 
 static void podhd_startup_workqueue(struct work_struct *work)
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index 8e22f43..d3871d9 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -365,15 +365,20 @@
 /*
 	Setup Toneport device.
 */
-static void toneport_setup(struct usb_line6_toneport *toneport)
+static int toneport_setup(struct usb_line6_toneport *toneport)
 {
-	int ticks;
+	int *ticks;
 	struct usb_line6 *line6 = &toneport->line6;
 	struct usb_device *usbdev = line6->usbdev;
 
+	ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
+	if (!ticks)
+		return -ENOMEM;
+
 	/* sync time on device with host: */
-	ticks = (int)get_seconds();
-	line6_write_data(line6, 0x80c6, &ticks, 4);
+	*ticks = (int)get_seconds();
+	line6_write_data(line6, 0x80c6, ticks, 4);
+	kfree(ticks);
 
 	/* enable device: */
 	toneport_send_cmd(usbdev, 0x0301, 0x0000);
@@ -388,6 +393,7 @@
 		toneport_update_led(toneport);
 
 	mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
+	return 0;
 }
 
 /*
@@ -451,7 +457,9 @@
 			return err;
 	}
 
-	toneport_setup(toneport);
+	err = toneport_setup(toneport);
+	if (err)
+		return err;
 
 	/* register audio system: */
 	return snd_card_register(line6->card);
@@ -463,7 +471,11 @@
 */
 static int toneport_reset_resume(struct usb_interface *interface)
 {
-	toneport_setup(usb_get_intfdata(interface));
+	int err;
+
+	err = toneport_setup(usb_get_intfdata(interface));
+	if (err)
+		return err;
 	return line6_resume(interface);
 }
 #endif
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index aa5b08c..089e085 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2443,6 +2443,8 @@
 	kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
 	if (! kctl) {
 		usb_audio_err(state->chip, "cannot malloc kcontrol\n");
+		for (i = 0; i < desc->bNrInPins; i++)
+			kfree(namelist[i]);
 		kfree(namelist);
 		kfree(cval);
 		return -ENOMEM;
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index b5ca536..961e473 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -202,6 +202,8 @@
 	       "SWAP  %15s%15s%15s\n"
 	       "      %15llu%15llu%15llums\n"
 	       "RECLAIM  %12s%15s%15s\n"
+	       "      %15llu%15llu%15llums\n"
+	       "THRASHING%12s%15s%15s\n"
 	       "      %15llu%15llu%15llums\n",
 	       "count", "real total", "virtual total",
 	       "delay total", "delay average",
@@ -221,7 +223,11 @@
 	       "count", "delay total", "delay average",
 	       (unsigned long long)t->freepages_count,
 	       (unsigned long long)t->freepages_delay_total,
-	       average_ms(t->freepages_delay_total, t->freepages_count));
+	       average_ms(t->freepages_delay_total, t->freepages_count),
+	       "count", "delay total", "delay average",
+	       (unsigned long long)t->thrashing_count,
+	       (unsigned long long)t->thrashing_delay_total,
+	       average_ms(t->thrashing_delay_total, t->thrashing_count));
 }
 
 static void task_context_switch_counts(struct taskstats *t)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 6694753..def6112 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -2204,7 +2204,7 @@
 		return val & 0xffffffff;
 
 	if (strcmp(type, "u64") == 0 ||
-	    strcmp(type, "s64"))
+	    strcmp(type, "s64") == 0)
 		return val;
 
 	if (strcmp(type, "s8") == 0)
@@ -2428,7 +2428,7 @@
 static char *arg_eval (struct print_arg *arg)
 {
 	long long val;
-	static char buf[20];
+	static char buf[24];
 
 	switch (arg->type) {
 	case PRINT_ATOM:
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 8ae824d..884d4f1 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,11 +7,12 @@
 endif
 
 # always use the host compiler
+HOSTAR	?= ar
 HOSTCC	?= gcc
 HOSTLD	?= ld
+AR	 = $(HOSTAR)
 CC	 = $(HOSTCC)
 LD	 = $(HOSTLD)
-AR	 = ar
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e128d1c..ae34467 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1779,7 +1779,8 @@
 			return 1;
 		}
 
-		func = insn->func ? insn->func->pfunc : NULL;
+		if (insn->func)
+			func = insn->func->pfunc;
 
 		if (func && insn->ignore) {
 			WARN_FUNC("BUG: why am I validating an ignored function?",
@@ -2132,9 +2133,10 @@
 	elf_close(file->elf);
 }
 
+static struct objtool_file file;
+
 int check(const char *_objname, bool orc)
 {
-	struct objtool_file file;
 	int ret, warnings = 0;
 
 	objname = _objname;
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index cb081ac5..bd359a0 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -112,7 +112,7 @@
 
 	[report]
 		# Defaults
-		sort-order = comm,dso,symbol
+		sort_order = comm,dso,symbol
 		percent-limit = 0
 		queue-size = 0
 		children = true
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index ee9565a..e58be7e 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -35,6 +35,10 @@
 #include <numa.h>
 #include <numaif.h>
 
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
 /*
  * Regular printout to the terminal, supressed if -q is specified:
  */
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e68c866..cd2900ac 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1323,8 +1323,9 @@
 		goto out_delete_evlist;
 
 	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
-	if (symbol__init(NULL) < 0)
-		return -1;
+	status = symbol__init(NULL);
+	if (status < 0)
+		goto out_delete_evlist;
 
 	sort__setup_elide(stdout);
 
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 66b53f1..b5d0be5 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -42,7 +42,7 @@
 		return -1;
 	}
 
-	if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+	if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
 		ret = -1;
 
 	if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -54,7 +54,7 @@
 	if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
 		ret = -1;
 
-	if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+	if (perf_evsel__test_field(evsel, "next_comm", 16, false))
 		ret = -1;
 
 	if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -72,7 +72,7 @@
 		return -1;
 	}
 
-	if (perf_evsel__test_field(evsel, "comm", 16, true))
+	if (perf_evsel__test_field(evsel, "comm", 16, false))
 		ret = -1;
 
 	if (perf_evsel__test_field(evsel, "pid", 4, true))
@@ -84,5 +84,6 @@
 	if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
 		ret = -1;
 
+	perf_evsel__delete(evsel);
 	return ret;
 }
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c8d9592..75d504e 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -38,7 +38,7 @@
 	if (IS_ERR(evsel)) {
 		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
 		pr_debug("%s\n", errbuf);
-		goto out_thread_map_delete;
+		goto out_cpu_map_delete;
 	}
 
 	if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -112,6 +112,8 @@
 	perf_evsel__close_fd(evsel, 1, threads->nr);
 out_evsel_delete:
 	perf_evsel__delete(evsel);
+out_cpu_map_delete:
+	cpu_map__put(cpus);
 out_thread_map_delete:
 	thread_map__put(threads);
 	return err;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 29d015e..b87221e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1244,9 +1244,9 @@
 	}
 
 	/* padding must be written by fn() e.g. record__process_auxtrace() */
-	padding = size & 7;
+	padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
 	if (padding)
-		padding = 8 - padding;
+		padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
 
 	memset(&ev, 0, sizeof(ev));
 	ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 26fb1ee..1b6963e 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -37,6 +37,9 @@
 struct auxtrace_info_event;
 struct events_stats;
 
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
 enum auxtrace_type {
 	PERF_AUXTRACE_UNKNOWN,
 	PERF_AUXTRACE_INTEL_PT,
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 993ef27..32aab95 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -176,6 +176,7 @@
 	return bf;
 }
 
+/* The caller is responsible to free the returned buffer. */
 char *build_id_cache__origname(const char *sbuild_id)
 {
 	char *linkname;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 18dae74..1d66f8e 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -595,11 +595,10 @@
 	}
 
 	ret = set_value(item, value);
-	return ret;
 
 out_free:
 	free(key);
-	return -1;
+	return ret;
 }
 
 static int perf_config_set__init(struct perf_config_set *set)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f7128c2..a62f795 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1167,6 +1167,7 @@
 {
 	assert(list_empty(&evsel->node));
 	assert(evsel->evlist == NULL);
+	perf_evsel__free_counts(evsel);
 	perf_evsel__free_fd(evsel);
 	perf_evsel__free_id(evsel);
 	perf_evsel__free_config_terms(evsel);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index ad613ea..82833ce 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1027,8 +1027,10 @@
 
 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
 					iter->evsel, al, max_stack_depth);
-	if (err)
+	if (err) {
+		map__put(alm);
 		return err;
+	}
 
 	err = iter->ops->prepare_entry(iter, al);
 	if (err)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index d27715f..63fa3a9 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -26,6 +26,7 @@
 
 #include "../cache.h"
 #include "../util.h"
+#include "../auxtrace.h"
 
 #include "intel-pt-insn-decoder.h"
 #include "intel-pt-pkt-decoder.h"
@@ -57,6 +58,7 @@
 	INTEL_PT_STATE_NO_IP,
 	INTEL_PT_STATE_ERR_RESYNC,
 	INTEL_PT_STATE_IN_SYNC,
+	INTEL_PT_STATE_TNT_CONT,
 	INTEL_PT_STATE_TNT,
 	INTEL_PT_STATE_TIP,
 	INTEL_PT_STATE_TIP_PGD,
@@ -71,8 +73,9 @@
 	case INTEL_PT_STATE_NO_IP:
 	case INTEL_PT_STATE_ERR_RESYNC:
 	case INTEL_PT_STATE_IN_SYNC:
-	case INTEL_PT_STATE_TNT:
+	case INTEL_PT_STATE_TNT_CONT:
 		return true;
+	case INTEL_PT_STATE_TNT:
 	case INTEL_PT_STATE_TIP:
 	case INTEL_PT_STATE_TIP_PGD:
 	case INTEL_PT_STATE_FUP:
@@ -239,19 +242,15 @@
 		if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
 			decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
 						decoder->tsc_ctc_ratio_d;
-
-		/*
-		 * Allow for timestamps appearing to backwards because a TSC
-		 * packet has slipped past a MTC packet, so allow 2 MTC ticks
-		 * or ...
-		 */
-		decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
-					decoder->tsc_ctc_ratio_n,
-					decoder->tsc_ctc_ratio_d);
 	}
-	/* ... or 0x100 paranoia */
-	if (decoder->tsc_slip < 0x100)
-		decoder->tsc_slip = 0x100;
+
+	/*
+	 * A TSC packet can slip past MTC packets so that the timestamp appears
+	 * to go backwards. One estimate is that can be up to about 40 CPU
+	 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+	 * slippage an order of magnitude more to be on the safe side.
+	 */
+	decoder->tsc_slip = 0x10000;
 
 	intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
 	intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
@@ -859,16 +858,20 @@
 	timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
 	masked_timestamp = timestamp & decoder->period_mask;
 	if (decoder->continuous_period) {
-		if (masked_timestamp != decoder->last_masked_timestamp)
+		if (masked_timestamp > decoder->last_masked_timestamp)
 			return 1;
 	} else {
 		timestamp += 1;
 		masked_timestamp = timestamp & decoder->period_mask;
-		if (masked_timestamp != decoder->last_masked_timestamp) {
+		if (masked_timestamp > decoder->last_masked_timestamp) {
 			decoder->last_masked_timestamp = masked_timestamp;
 			decoder->continuous_period = true;
 		}
 	}
+
+	if (masked_timestamp < decoder->last_masked_timestamp)
+		return decoder->period_ticks;
+
 	return decoder->period_ticks - (timestamp - masked_timestamp);
 }
 
@@ -897,7 +900,10 @@
 	case INTEL_PT_PERIOD_TICKS:
 		timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
 		masked_timestamp = timestamp & decoder->period_mask;
-		decoder->last_masked_timestamp = masked_timestamp;
+		if (masked_timestamp > decoder->last_masked_timestamp)
+			decoder->last_masked_timestamp = masked_timestamp;
+		else
+			decoder->last_masked_timestamp += decoder->period_ticks;
 		break;
 	case INTEL_PT_PERIOD_NONE:
 	case INTEL_PT_PERIOD_MTC:
@@ -1174,7 +1180,9 @@
 				return -ENOENT;
 			}
 			decoder->tnt.count -= 1;
-			if (!decoder->tnt.count)
+			if (decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+			else
 				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 			decoder->tnt.payload <<= 1;
 			decoder->state.from_ip = decoder->ip;
@@ -1205,7 +1213,9 @@
 
 		if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
 			decoder->tnt.count -= 1;
-			if (!decoder->tnt.count)
+			if (decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+			else
 				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 			if (decoder->tnt.payload & BIT63) {
 				decoder->tnt.payload <<= 1;
@@ -1225,8 +1235,11 @@
 				return 0;
 			}
 			decoder->ip += intel_pt_insn.length;
-			if (!decoder->tnt.count)
+			if (!decoder->tnt.count) {
+				decoder->sample_timestamp = decoder->timestamp;
+				decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
 				return -EAGAIN;
+			}
 			decoder->tnt.payload <<= 1;
 			continue;
 		}
@@ -1311,7 +1324,6 @@
 {
 	intel_pt_log("ERROR: Buffer overflow\n");
 	intel_pt_clear_tx_flags(decoder);
-	decoder->cbr = 0;
 	decoder->timestamp_insn_cnt = 0;
 	decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
 	decoder->overflow = true;
@@ -2150,6 +2162,7 @@
 			err = intel_pt_walk_trace(decoder);
 			break;
 		case INTEL_PT_STATE_TNT:
+		case INTEL_PT_STATE_TNT_CONT:
 			err = intel_pt_walk_tnt(decoder);
 			if (err == -EAGAIN)
 				err = intel_pt_walk_trace(decoder);
@@ -2351,6 +2364,34 @@
 	}
 }
 
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+				      unsigned char *buf_a, size_t len_a)
+{
+	unsigned char *p = buf_b - MAX_PADDING;
+	unsigned char *q = buf_a + len_a - MAX_PADDING;
+	int i;
+
+	for (i = MAX_PADDING; i; i--, p++, q++) {
+		if (*p != *q)
+			break;
+	}
+
+	return p;
+}
+
 /**
  * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
  *                             using TSC.
@@ -2401,8 +2442,11 @@
 
 			/* Same TSC, so buffers are consecutive */
 			if (!cmp && rem_b >= rem_a) {
+				unsigned char *start;
+
 				*consecutive = true;
-				return buf_b + len_b - (rem_b - rem_a);
+				start = buf_b + len_b - (rem_b - rem_a);
+				return adj_for_padding(start, buf_a, len_a);
 			}
 			if (cmp < 0)
 				return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2465,7 +2509,7 @@
 		found = memmem(buf_a, len_a, buf_b, len_a);
 		if (found) {
 			*consecutive = true;
-			return buf_b + len_a;
+			return adj_for_padding(buf_b + len_a, buf_a, len_a);
 		}
 
 		/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index d40ab4c..24c6621 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2259,6 +2259,8 @@
 	}
 
 	pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+	if (pt->timeless_decoding && !pt->tc.time_mult)
+		pt->tc.time_mult = 1;
 	pt->have_tsc = intel_pt_have_tsc(pt);
 	pt->sampling_mode = false;
 	pt->est_tsc = !pt->timeless_decoding;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 14f111a..6193be6 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2104,6 +2104,7 @@
 				printf("  %-50s [%s]\n", buf, "SDT event");
 				free(buf);
 			}
+			free(path);
 		} else
 			printf("  %-50s [%s]\n", nd->s, "SDT event");
 		if (nd2) {
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 8561e7d..92be948 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -8,7 +8,7 @@
 endif
 
 turbostat : turbostat.c
-CFLAGS +=	-Wall
+CFLAGS +=	-Wall -I../../../include
 CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 
 %: %.c
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 5ec2de8..b4c5d96 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3691,6 +3691,9 @@
 		signal(SIGQUIT, SIG_IGN);
 		if (waitpid(child_pid, &status, 0) == -1)
 			err(status, "waitpid");
+
+		if (WIFEXITED(status))
+			status = WEXITSTATUS(status);
 	}
 	/*
 	 * n.b. fork_it() does not check for errors from for_all_cpus()
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index 16058bb..c195b44 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -6,7 +6,7 @@
 ./socket
 if [ $? -ne 0 ]; then
 	echo "[FAIL]"
+	exit 1
 else
 	echo "[PASS]"
 fi
-
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index c9ff2b4..a37cb11 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755
index 0000000..b48e183
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without nft tool"
+	exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without ip tool"
+	exit $ksft_skip
+fi
+
+cleanup() {
+	for i in 1 2;do ip netns del nsclient$i;done
+	for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+    echo -n 192.168.$1.2
+}
+
+ipv6 () {
+    echo -n dead:$1::2
+}
+
+check_counter()
+{
+	ns=$1
+	name=$2
+	expect=$3
+	local lret=0
+
+	cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+	if [ $? -ne 0 ]; then
+		echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+		ip netns exec $ns nft list counter inet filter "$name" 1>&2
+		lret=1
+	fi
+
+	return $lret
+}
+
+check_unknown()
+{
+	expect="packets 0 bytes 0"
+	for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+		check_counter $n "unknown" "$expect"
+		if [ $? -ne 0 ] ;then
+			return 1
+		fi
+	done
+
+	return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+  ip netns add $n
+  ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+    ip -net nsclient$i link set $DEV up
+    ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+    ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+	ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+	ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter related { }
+	chain forward {
+		type filter hook forward priority 0; policy accept;
+		meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+		meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+		meta l4proto { icmp, icmpv6 } ct state new,established accept
+		counter name "unknown" drop
+	}
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter related { }
+	chain input {
+		type filter hook input priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+		counter name "unknown" drop
+	}
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+	counter unknown { }
+	counter new { }
+	counter established { }
+
+	chain input {
+		type filter hook input priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+		meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+		counter name "unknown" drop
+	}
+	chain output {
+		type filter hook output priority 0; policy accept;
+		meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+		meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+		meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+		counter name "unknown" drop
+	}
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		ip protocol icmp oifname "veth0" counter masquerade
+	}
+}
+table ip6 nat {
+	chain postrouting {
+		type nat hook postrouting priority 0; policy accept;
+		ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+	}
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1  mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+	echo "ERROR: netns ip routing/connectivity broken" 1>&2
+	cleanup
+	exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+	echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+	cleanup
+	exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+	echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+	ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+	ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+	echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+	ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+	check_counter "$netns" "related" "$expect"
+	if [ $? -ne 0 ]; then
+		ret=1
+	fi
+done
+
+if [ $ret -eq 0 ];then
+	echo "PASS: icmp mtu error had RELATED state"
+else
+	echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 60de4c3..c72586a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2793,6 +2793,9 @@
 {
 	struct kvm_device *dev = filp->private_data;
 
+	if (dev->kvm->mm != current->mm)
+		return -EIO;
+
 	switch (ioctl) {
 	case KVM_SET_DEVICE_ATTR:
 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);