Merge tag 'upstream-4.0-rc5' of git://git.infradead.org/linux-ubifs

Pull UBI fix from Artem Bityutskiy:
 "This fixes a bug introduced during the v4.0 merge window where we
  forgot to put braces where they should be"

* tag 'upstream-4.0-rc5' of git://git.infradead.org/linux-ubifs:
  UBI: fix missing brace control flow
diff --git a/Documentation/CodeOfConflict b/Documentation/CodeOfConflict
new file mode 100644
index 0000000..1684d0b
--- /dev/null
+++ b/Documentation/CodeOfConflict
@@ -0,0 +1,27 @@
+Code of Conflict
+----------------
+
+The Linux kernel development effort is a very personal process compared
+to "traditional" ways of developing software.  Your code and ideas
+behind it will be carefully reviewed, often resulting in critique and
+criticism.  The review will almost always require improvements to the
+code before it can be included in the kernel.  Know that this happens
+because everyone involved wants to see the best possible solution for
+the overall success of Linux.  This development process has been proven
+to create the most robust operating system kernel ever, and we do not
+want to do anything to cause the quality of submission and eventual
+result to ever decrease.
+
+If however, anyone feels personally abused, threatened, or otherwise
+uncomfortable due to this process, that is not acceptable.  If so,
+please contact the Linux Foundation's Technical Advisory Board at
+<tab@lists.linux-foundation.org>, or the individual members, and they
+will work to resolve the issue to the best of their ability.  For more
+information on who is on the Technical Advisory Board and what their
+role is, please see:
+	http://www.linuxfoundation.org/programs/advisory-councils/tab
+
+As a reviewer of code, please strive to keep things civil and focused on
+the technical issues involved.  We are all humans, and frustrations can
+be high on both sides of the process.  Try to keep in mind the immortal
+words of Bill and Ted, "Be excellent to each other."
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 2428cc0..f3abca7 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -197,6 +197,7 @@
    may be configured as a kernel built-in or a kernel loadable module.
    You can only make use of <constant>kgdbwait</constant> and early
    debugging if you build kgdboc into the kernel as a built-in.
+   </para>
    <para>Optionally you can elect to activate kms (Kernel Mode
    Setting) integration.  When you use kms with kgdboc and you have a
    video driver that has atomic mode setting hooks, it is possible to
@@ -206,7 +207,6 @@
    crashes or doing analysis of memory with kdb while allowing the
    full graphics console applications to run.
    </para>
-   </para>
    <sect2 id="kgdbocArgs">
    <title>kgdboc arguments</title>
    <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
@@ -284,7 +284,6 @@
    </listitem>
    </orderedlist>
    </para>
-   </sect3>
    <para>NOTE: Kgdboc does not support interrupting the target via the
    gdb remote protocol.  You must manually send a sysrq-g unless you
    have a proxy that splits console output to a terminal program.
@@ -305,6 +304,7 @@
     as well as on the initial connect, or to use a debugger proxy that
     allows an unmodified gdb to do the debugging.
    </para>
+   </sect3>
    </sect2>
    </sect1>
    <sect1 id="kgdbwait">
@@ -350,12 +350,12 @@
    </para>
    </listitem>
    </orderedlist>
+  </para>
    <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
    active system console.  An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
    </para>
    <para>It is possible to use this option with kgdboc on a tty that is not a system console.
    </para>
-  </para>
   </sect1>
    <sect1 id="kgdbreboot">
    <title>Run time parameter: kgdbreboot</title>
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index 71daa35..eb102fb 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -404,8 +404,8 @@
   be understood as an underflow into the highest possible value, -2 or
   -10M etc. do not work, so it's not consistent.
 
-  memory.low, memory.high, and memory.max will use the string
-  "infinity" to indicate and set the highest possible value.
+  memory.low, memory.high, and memory.max will use the string "max" to
+  indicate and set the highest possible value.
 
 5. Planned Changes
 
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 4ff8462..0e4f90a 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -73,6 +73,8 @@
 						unsigned long *parent_rate);
 		long		(*determine_rate)(struct clk_hw *hw,
 						unsigned long rate,
+						unsigned long min_rate,
+						unsigned long max_rate,
 						unsigned long *best_parent_rate,
 						struct clk_hw **best_parent_clk);
 		int		(*set_parent)(struct clk_hw *hw, u8 index);
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index c81839b..ad69778 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -51,7 +51,7 @@
     Otherwise #opt_params is the number of following arguments.
 
     Example of optional parameters section:
-        1 allow_discards
+        3 allow_discards same_cpu_crypt submit_from_crypt_cpus
 
 allow_discards
     Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,19 @@
     used space etc.) if the discarded blocks can be located easily on the
     device later.
 
+same_cpu_crypt
+    Perform encryption using the same cpu that IO was submitted on.
+    The default is to use an unbound workqueue so that encryption work
+    is automatically balanced between available CPUs.
+
+submit_from_crypt_cpus
+    Disable offloading writes to a separate thread after encryption.
+    There are some situations where offloading write bios from the
+    encryption threads to a single thread degrades performance
+    significantly.  The default is to offload write bios to the same
+    thread because it benefits CFQ to have writes submitted using the
+    same context.
+
 Example scripts
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index f4445e5..1e09703 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -22,6 +22,8 @@
 	- pclkN, clkN: Pairs of parent of input clock and input clock to the
 		devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
 		are supported currently.
+- power-domains: phandle pointing to the parent power domain, for more details
+		 see Documentation/devicetree/bindings/power/power_domain.txt
 
 Node of a device using power domains must have a power-domains property
 defined with a phandle to respective power domain.
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt
index d70ec35..8d27f6b 100644
--- a/Documentation/devicetree/bindings/arm/sti.txt
+++ b/Documentation/devicetree/bindings/arm/sti.txt
@@ -13,6 +13,10 @@
 Required root node property:
 compatible = "st,stih407";
 
+Boards with the ST STiH410 SoC shall have the following properties:
+Required root node property:
+compatible = "st,stih410";
+
 Boards with the ST STiH418 SoC shall have the following properties:
 Required root node property:
 compatible = "st,stih418";
diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
index 6d3d5f8..6bf1e74 100644
--- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
@@ -34,6 +34,8 @@
 	- "samsung,exynos7-clock-peris"
 	- "samsung,exynos7-clock-fsys0"
 	- "samsung,exynos7-clock-fsys1"
+	- "samsung,exynos7-clock-mscl"
+	- "samsung,exynos7-clock-aud"
 
  - reg: physical base address of the controller and the length of
 	memory mapped region.
@@ -53,6 +55,7 @@
 	- dout_sclk_bus1_pll
 	- dout_sclk_cc_pll
 	- dout_sclk_mfc_pll
+	- dout_sclk_aud_pll
 
 Input clocks for top1 clock controller:
 	- fin_pll
@@ -76,6 +79,14 @@
 	- sclk_uart1
 	- sclk_uart2
 	- sclk_uart3
+	- sclk_spi0
+	- sclk_spi1
+	- sclk_spi2
+	- sclk_spi3
+	- sclk_spi4
+	- sclk_i2s1
+	- sclk_pcm1
+	- sclk_spdif
 
 Input clocks for peris clock controller:
 	- fin_pll
@@ -91,3 +102,7 @@
 	- dout_aclk_fsys1_200
 	- dout_sclk_mmc0
 	- dout_sclk_mmc1
+
+Input clocks for aud clock controller:
+	- fin_pll
+	- fout_aud_pll
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
index ded5d62..c6620bc 100644
--- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
@@ -1,4 +1,4 @@
-NVIDIA Tegra124 Clock And Reset Controller
+NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
 
 This binding uses the common clock binding:
 Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -7,14 +7,16 @@
 for muxing and gating Tegra's clocks, and setting their rates.
 
 Required properties :
-- compatible : Should be "nvidia,tegra124-car"
+- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
 - reg : Should contain CAR registers location and length
 - clocks : Should contain phandle and clock specifiers for two clocks:
   the 32 KHz "32k_in", and the board-specific oscillator "osc".
 - #clock-cells : Should be 1.
   In clock consumers, this cell represents the clock ID exposed by the
-  CAR. The assignments may be found in header file
-  <dt-bindings/clock/tegra124-car.h>.
+  CAR. The assignments may be found in the header files
+  <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
+  to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
+  (for Tegra124-specific clocks).
 - #reset-cells : Should be 1.
   In clock consumers, this cell represents the bit number in the CAR's
   array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
new file mode 100644
index 0000000..dd755be
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
@@ -0,0 +1,21 @@
+Qualcomm LPASS Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+			"qcom,lcc-msm8960"
+			"qcom,lcc-apq8064"
+			"qcom,lcc-ipq8064"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Example:
+	clock-controller@28000000 {
+		compatible = "qcom,lcc-ipq8064";
+		reg = <0x28000000 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 266ff9d..df4a259 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -1,6 +1,6 @@
-* Clock Block on Freescale CoreNet Platforms
+* Clock Block on Freescale QorIQ Platforms
 
-Freescale CoreNet chips take primary clocking input from the external
+Freescale qoriq chips take primary clocking input from the external
 SYSCLK signal. The SYSCLK input (frequency) is multiplied using
 multiple phase locked loops (PLL) to create a variety of frequencies
 which can then be passed to a variety of internal logic, including
@@ -29,6 +29,7 @@
 	* "fsl,t4240-clockgen"
 	* "fsl,b4420-clockgen"
 	* "fsl,b4860-clockgen"
+	* "fsl,ls1021a-clockgen"
 	Chassis clock strings include:
 	* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
 	* "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index 2e18676..0a80fa7 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -11,6 +11,7 @@
 
   - compatible: Must be one of the following
     - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
+    - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
     - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
     - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
     - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
new file mode 100644
index 0000000..ece9239
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
@@ -0,0 +1,33 @@
+* Renesas R8A73A4 Clock Pulse Generator (CPG)
+
+The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs
+and several fixed ratio dividers.
+
+Required Properties:
+
+  - compatible: Must be "renesas,r8a73a4-cpg-clocks"
+
+  - reg: Base address and length of the memory resource used by the CPG
+
+  - clocks: Reference to the parent clocks ("extal1" and "extal2")
+
+  - #clock-cells: Must be 1
+
+  - clock-output-names: The names of the clocks. Supported clocks are "main",
+    "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b",
+    "m1", "m2", "zx", "zs", and "hp".
+
+
+Example
+-------
+
+        cpg_clocks: cpg_clocks@e6150000 {
+                compatible = "renesas,r8a73a4-cpg-clocks";
+                reg = <0 0xe6150000 0 0x10000>;
+                clocks = <&extal1_clk>, <&extal2_clk>;
+                #clock-cells = <1>;
+                clock-output-names = "main", "pll0", "pll1", "pll2",
+                                     "pll2s", "pll2h", "z", "z2",
+                                     "i", "m3", "b", "m1", "m2",
+                                     "zx", "zs", "hp";
+        };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
index e6ad35b..b02944f 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
@@ -8,15 +8,18 @@
   - compatible: Must be one of
     - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
     - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
+    - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
     - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
     - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
 
   - reg: Base address and length of the memory resource used by the CPG
 
-  - clocks: Reference to the parent clock
+  - clocks: References to the parent clocks: first to the EXTAL clock, second
+    to the USB_EXTAL clock
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "main",
-    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z"
+    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
+    "adsp"
 
 
 Example
@@ -26,8 +29,9 @@
 		compatible = "renesas,r8a7790-cpg-clocks",
 			     "renesas,rcar-gen2-cpg-clocks";
 		reg = <0 0xe6150000 0 0x1000>;
-		clocks = <&extal_clk>;
+		clocks = <&extal_clk &usb_extal_clk>;
 		#clock-cells = <1>;
 		clock-output-names = "main", "pll0, "pll1", "pll3",
-				     "lb", "qspi", "sdh", "sd0", "sd1", "z";
+				     "lb", "qspi", "sdh", "sd0", "sd1", "z",
+				     "rcan", "adsp";
 	};
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 67b2b99..60b4428 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -26,7 +26,7 @@
 	"allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
 	"allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
 	"allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
-	"allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
+	"allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
 	"allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
 	"allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
 	"allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
@@ -55,9 +55,11 @@
 	"allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
 	"allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
 	"allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
-	"allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10
-	"allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10
+	"allwinner,sun4i-a10-mmc-clk" - for the MMC clock
+	"allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
+	"allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
 	"allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
+	"allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
 	"allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
 	"allwinner,sun7i-a20-out-clk" - for the external output clocks
 	"allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
@@ -73,7 +75,9 @@
 - #clock-cells : from common clock binding; shall be set to 0 except for
 	the following compatibles where it shall be set to 1:
 	"allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-	"allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
+	"allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+	"allwinner,*-usb-clk", "allwinner,*-mmc-clk",
+	"allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
 	If the clock module only has one output, the name shall be the
 	module name.
@@ -81,6 +85,10 @@
 And "allwinner,*-usb-clk" clocks also require:
 - reset-cells : shall be set to 1
 
+The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
+- #reset-cells : shall be set to 1
+- resets : shall be the reset control phandle for the mmc block.
+
 For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
 dummy clocks at 25 MHz and 125 MHz, respectively. See example.
 
@@ -95,6 +103,14 @@
 is the normal PLL6 output, or "pll6". The second output is rate doubled
 PLL6, or "pll6x2".
 
+The "allwinner,*-mmc-clk" clocks have three different outputs: the
+main clock, with the ID 0, and the output and sample clocks, with the
+IDs 1 and 2, respectively.
+
+The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
+per mmc controller. The number of outputs is determined by the size of
+the address block, which is related to the overall mmc block.
+
 For example:
 
 osc24M: clk@01c20050 {
@@ -138,11 +154,11 @@
 };
 
 mmc0_clk: clk@01c20088 {
-	#clock-cells = <0>;
-	compatible = "allwinner,sun4i-mod0-clk";
+	#clock-cells = <1>;
+	compatible = "allwinner,sun4i-a10-mmc-clk";
 	reg = <0x01c20088 0x4>;
 	clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-	clock-output-names = "mmc0";
+	clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
 };
 
 mii_phy_tx_clk: clk@2 {
@@ -170,3 +186,16 @@
 	clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
 	clock-output-names = "gmac";
 };
+
+mmc_config_clk: clk@01c13000 {
+	compatible = "allwinner,sun9i-a80-mmc-config-clk";
+	reg = <0x01c13000 0x10>;
+	clocks = <&ahb0_gates 8>;
+	clock-names = "ahb";
+	resets = <&ahb0_resets 8>;
+	reset-names = "ahb";
+	#clock-cells = <1>;
+	#reset-cells = <1>;
+	clock-output-names = "mmc0_config", "mmc1_config",
+			     "mmc2_config", "mmc3_config";
+};
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
new file mode 100644
index 0000000..616836e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
@@ -0,0 +1,42 @@
+Bindings for Texas Instruments CDCE706 programmable 3-PLL clock
+synthesizer/multiplier/divider.
+
+Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+
+I2C device node required properties:
+- compatible: shall be "ti,cdce706".
+- reg: i2c device address, shall be in range [0x68...0x6b].
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+  handles, shall be reference clock(s) connected to CLK_IN0
+  and CLK_IN1 pins.
+- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0
+  in case of crystal oscillator or differential signal input
+  configuration. Use clk_in0 and clk_in1 in case of independent
+  single-ended LVCMOS inputs configuration.
+
+Example:
+
+	clocks {
+		clk54: clk54 {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <54000000>;
+		};
+	};
+	...
+	i2c0: i2c-master@0d090000 {
+		...
+		cdce706: clock-synth@69 {
+			compatible = "ti,cdce706";
+			#clock-cells = <1>;
+			reg = <0x69>;
+			clocks = <&clk54>;
+			clock-names = "clk_in0";
+		};
+	};
+	...
+	simple-audio-card,codec {
+		...
+		clocks = <&cdce706 4>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
new file mode 100644
index 0000000..c19b3f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/fapll.txt
@@ -0,0 +1,33 @@
+Binding for Texas Instruments FAPLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a
+register-mapped FAPLL with usually two selectable input clocks
+(reference clock and bypass clock), and one or more child
+syntesizers.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,dm816-fapll-clock"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
+- reg : address and length of the register set for controlling the FAPLL.
+
+Examples:
+	main_fapll: main_fapll {
+		#clock-cells = <1>;
+		compatible = "ti,dm816-fapll-clock";
+		reg = <0x400 0x40>;
+		clocks = <&sys_clkin_ck &sys_clkin_ck>;
+		clock-indices = <1>, <2>, <3>, <4>, <5>,
+				<6>, <7>;
+		clock-output-names = "main_pll_clk1",
+				     "main_pll_clk2",
+				     "main_pll_clk3",
+				     "main_pll_clk4",
+				     "main_pll_clk5",
+				     "main_pll_clk6",
+				     "main_pll_clk7";
+	};
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
new file mode 100644
index 0000000..81f982c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
@@ -0,0 +1,37 @@
+Broadcom iProc I2C controller
+
+Required properties:
+
+- compatible:
+    Must be "brcm,iproc-i2c"
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+    I2C controller registers
+
+- interrupts:
+    Should contain the I2C interrupt
+
+- clock-frequency:
+    This is the I2C bus clock. Need to be either 100000 or 400000
+
+- #address-cells:
+    Always 1 (for I2C addresses)
+
+- #size-cells:
+    Always 0
+
+Example:
+	i2c0: i2c@18008000 {
+		compatible = "brcm,iproc-i2c";
+		reg = <0x18008000 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+		clock-frequency = <100000>;
+
+		codec: wm8750@1a {
+			compatible = "wlf,wm8750";
+			reg = <0x1a>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
index 52d37fd..ce4311d 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
@@ -7,6 +7,7 @@
   - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC
 - reg : Should contain I2C/HS-I2C registers location and length
 - interrupts : Should contain I2C/HS-I2C interrupt
+- clocks : Should contain the I2C/HS-I2C clock specifier
 
 Optional properties:
 - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
index 34a3fb6..cf53d5f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
@@ -16,6 +16,9 @@
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the reset input.
+  - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
+    children in idle state. This is necessary for example, if there are several
+    multiplexers on the bus and the devices behind them use same I2C addresses.
 
 
 Example:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
index 1637c29..17bef9a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
@@ -4,16 +4,34 @@
 - compatible      : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
 - reg             : bus address start and address range size of device
 - interrupts      : interrupt number
-- clock-frequency : frequency of bus clock in Hz
+- clocks          : handle to the controller clock; see the note below.
+                    Mutually exclusive with opencores,ip-clock-frequency
+- opencores,ip-clock-frequency: frequency of the controller clock in Hz;
+                    see the note below. Mutually exclusive with clocks
 - #address-cells  : should be <1>
 - #size-cells     : should be <0>
 
 Optional properties:
+- clock-frequency : frequency of bus clock in Hz; see the note below.
+                    Defaults to 100 KHz when the property is not specified
 - reg-shift       : device register offsets are shifted by this value
 - reg-io-width    : io register width in bytes (1, 2 or 4)
 - regstep         : deprecated, use reg-shift above
 
-Example:
+Note
+clock-frequency property is meant to control the bus frequency for i2c bus
+drivers, but it was incorrectly used to specify i2c controller input clock
+frequency. So the following rules are set to fix this situation:
+- if clock-frequency is present and neither opencores,ip-clock-frequency nor
+  clocks are, then clock-frequency specifies i2c controller clock frequency.
+  This is to keep backwards compatibility with setups using old DTB. i2c bus
+  frequency is fixed at 100 KHz.
+- if clocks is present it specifies i2c controller clock. clock-frequency
+  property specifies i2c bus frequency.
+- if opencores,ip-clock-frequency is present it specifies i2c controller
+  clock frequency. clock-frequency property specifies i2c bus frequency.
+
+Examples:
 
 	i2c0: ocores@a0000000 {
 		#address-cells = <1>;
@@ -21,7 +39,25 @@
 		compatible = "opencores,i2c-ocores";
 		reg = <0xa0000000 0x8>;
 		interrupts = <10>;
-		clock-frequency = <20000000>;
+		opencores,ip-clock-frequency = <20000000>;
+
+		reg-shift = <0>;	/* 8 bit registers */
+		reg-io-width = <1>;	/* 8 bit read/write */
+
+		dummy@60 {
+			compatible = "dummy";
+			reg = <0x60>;
+		};
+	};
+or
+	i2c0: ocores@a0000000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "opencores,i2c-ocores";
+		reg = <0xa0000000 0x8>;
+		interrupts = <10>;
+		clocks = <&osc>;
+		clock-frequency = <400000>; /* i2c bus frequency 400 KHz */
 
 		reg-shift = <0>;	/* 8 bit registers */
 		reg-io-width = <1>;	/* 8 bit read/write */
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index dde6c22..f0d71bc 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -21,6 +21,17 @@
 Optional properties :
 
  - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+ - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
+	(t(r) in I2C specification). If not specified this is assumed to be
+	the maximum the specification allows(1000 ns for Standard-mode,
+	300 ns for Fast-mode) which might cause slightly slower communication.
+ - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
+	(t(f) in the I2C specification). If not specified this is assumed to
+	be the maximum the specification allows (300 ns) which might cause
+	slightly slower communication.
+ - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
+	(t(f) in the I2C specification). If not specified we'll use the SCL
+	value since they are the same in nearly all cases.
 
 Example:
 
@@ -39,4 +50,7 @@
 
 	clock-names = "i2c";
 	clocks = <&cru PCLK_I2C0>;
+
+	i2c-scl-rising-time-ns = <800>;
+	i2c-scl-falling-time-ns = <100>;
 };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 4dcd88d..aaa8325 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -61,9 +61,8 @@
 gmt,g751		G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
 infineon,slb9635tt	Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt	Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isl,isl12057		Intersil ISL12057 I2C RTC Chip
-isil,isl29028           (deprecated, use isl)
-isl,isl29028            Intersil ISL29028 Ambient Light and Proximity Sensor
+isil,isl12057		Intersil ISL12057 I2C RTC Chip
+isil,isl29028		Intersil ISL29028 Ambient Light and Proximity Sensor
 maxim,ds1050		5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237		Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625		9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
new file mode 100644
index 0000000..f39a1aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt
@@ -0,0 +1,43 @@
+* Cavium Interrupt Bus widget
+
+Properties:
+- compatible: "cavium,octeon-7130-cib"
+
+  Compatibility with cn70XX SoCs.
+
+- interrupt-controller:  This is an interrupt controller.
+
+- reg: Two elements consisting of the addresses of the RAW and EN
+  registers of the CIB block
+
+- cavium,max-bits: The index (zero based) of the highest numbered bit
+  in the CIB block.
+
+- interrupt-parent:  Always the CIU on the SoC.
+
+- interrupts: The CIU line to which the CIB block is connected.
+
+- #interrupt-cells: Must be <2>.  The first cell is the bit within the
+   CIB.  The second cell specifies the triggering semantics of the
+   line.
+
+Example:
+
+	interrupt-controller@107000000e000 {
+		compatible = "cavium,octeon-7130-cib";
+		reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */
+		      <0x10700 0x0000e100 0x0 0x8>; /* EN */
+		cavium,max-bits = <23>;
+
+		interrupt-controller;
+		interrupt-parent = <&ciu>;
+		interrupts = <1 24>;
+		/* Interrupts are specified by two parts:
+		 * 1) Bit number in the CIB* registers
+		 * 2) Triggering (1 - edge rising
+		 *		  2 - edge falling
+		 *		  4 - level active high
+		 *		  8 - level active low)
+		 */
+		#interrupt-cells = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 91b3a34..4bf41d8 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -10,8 +10,8 @@
 Required properties:
  - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
  - reg : mmc controller base registers
- - clocks : a list with 2 phandle + clock specifier pairs
- - clock-names : must contain "ahb" and "mmc"
+ - clocks : a list with 4 phandle + clock specifier pairs
+ - clock-names : must contain "ahb", "mmc", "output" and "sample"
  - interrupts : mmc controller interrupt
 
 Optional properties:
@@ -25,8 +25,8 @@
 	mmc0: mmc@01c0f000 {
 		compatible = "allwinner,sun5i-a13-mmc";
 		reg = <0x01c0f000 0x1000>;
-		clocks = <&ahb_gates 8>, <&mmc0_clk>;
-		clock-names = "ahb", "mod";
+		clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
+		clock-names = "ahb", "mod", "output", "sample";
 		interrupts = <0 32 4>;
 		status = "disabled";
 	};
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
index 33df393..8db3238 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -27,6 +27,8 @@
 - amd,serdes-cdr-rate: CDR rate speed selection
 - amd,serdes-pq-skew: PQ (data sampling) skew
 - amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
 	xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@
 		amd,serdes-cdr-rate = <2>, <2>, <7>;
 		amd,serdes-pq-skew = <10>, <10>, <30>;
 		amd,serdes-tx-amp = <15>, <15>, <10>;
+		amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+		amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
 	};
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index cfcc527..6151999 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -4,7 +4,10 @@
 APM X-Gene SoC.
 
 Required properties for all the ethernet interfaces:
-- compatible: Should be "apm,xgene-enet"
+- compatible: Should state binding information from the following list,
+  - "apm,xgene-enet":    RGMII based 1G interface
+  - "apm,xgene1-sgenet": SGMII based 1G interface
+  - "apm,xgene1-xgenet": XFI based 10G interface
 - reg: Address and length of the register set for the device. It contains the
   information of registers in the same order as described by reg-names
 - reg-names: Should contain the register set names
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 98c1667..0f8ed37 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -19,6 +19,16 @@
    providing multiple PM domains (e.g. power controllers), but can be any value
    as specified by device tree binding documentation of particular provider.
 
+Optional properties:
+ - power-domains : A phandle and PM domain specifier as defined by bindings of
+                   the power controller specified by phandle.
+   Some power domains might be powered from another power domain (or have
+   other hardware specific dependencies). For representing such dependency
+   a standard PM domain consumer binding is used. When provided, all domains
+   created by the given provider should be subdomains of the domain
+   specified by this binding. More details about power domain specifier are
+   available in the next section.
+
 Example:
 
 	power: power-controller@12340000 {
@@ -30,6 +40,25 @@
 The node above defines a power controller that is a PM domain provider and
 expects one cell as its phandle argument.
 
+Example 2:
+
+	parent: power-controller@12340000 {
+		compatible = "foo,power-controller";
+		reg = <0x12340000 0x1000>;
+		#power-domain-cells = <1>;
+	};
+
+	child: power-controller@12340000 {
+		compatible = "foo,power-controller";
+		reg = <0x12341000 0x1000>;
+		power-domains = <&parent 0>;
+		#power-domain-cells = <1>;
+	};
+
+The nodes above define two power controllers: 'parent' and 'child'.
+Domains created by the 'child' power controller are subdomains of '0' power
+domain provided by the 'parent' power controller.
+
 ==PM domain consumers==
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt
similarity index 100%
rename from Documentation/devicetree/bindings/serial/of-serial.txt
rename to Documentation/devicetree/bindings/serial/8250.txt
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
new file mode 100644
index 0000000..ebcbb62
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
@@ -0,0 +1,19 @@
+ETRAX FS UART
+
+Required properties:
+- compatible : "axis,etraxfs-uart"
+- reg: offset and length of the register set for the device.
+- interrupts: device interrupt
+
+Optional properties:
+- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD
+  line respectively.
+
+Example:
+
+serial@b00260000 {
+	compatible = "axis,etraxfs-uart";
+	reg = <0xb0026000 0x1000>;
+	interrupts = <68>;
+	status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
index 7f76214..289c40e 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
@@ -21,6 +21,18 @@
 - reg-io-width : the size (in bytes) of the IO accesses that should be
   performed on the device.  If this property is not present then single byte
   accesses are used.
+- dcd-override : Override the DCD modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- dsr-override : Override the DTS modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- cts-override : Override the CTS modem status signal. This signal will always
+  be reported as active instead of being obtained from the modem status
+  register. Define this if your serial port does not use this pin.
+- ri-override : Override the RI modem status signal. This signal will always be
+  reported as inactive instead of being obtained from the modem status register.
+  Define this if your serial port does not use this pin.
 
 Example:
 
@@ -31,6 +43,10 @@
 		interrupts = <10>;
 		reg-shift = <2>;
 		reg-io-width = <4>;
+		dcd-override;
+		dsr-override;
+		cts-override;
+		ri-override;
 	};
 
 Example with one clock:
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 56742bc..7d44eae 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -12,6 +12,9 @@
 
        devicetree@vger.kernel.org
 
+     and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
+     all of the DT maintainers.
+
   3) The Documentation/ portion of the patch should come in the series before
      the code implementing the binding.
 
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 389ca13..fae26d0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -20,6 +20,7 @@
 ams	AMS AG
 amstaos	AMS-Taos Inc.
 apm	Applied Micro Circuits Corporation (APM)
+arasan	Arasan Chip Systems
 arm	ARM Ltd.
 armadeus	ARMadeus Systems SARL
 asahi-kasei	Asahi Kasei Corp.
@@ -27,6 +28,7 @@
 auo	AU Optronics Corporation
 avago	Avago Technologies
 avic	Shanghai AVIC Optoelectronics Co., Ltd.
+axis	Axis Communications AB
 bosch	Bosch Sensortec GmbH
 brcm	Broadcom Corporation
 buffalo	Buffalo, Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
index f90e294..a4d8697 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -26,6 +26,11 @@
 - atmel,disable : Should be present if you want to disable the watchdog.
 - atmel,idle-halt : Should be present if you want to stop the watchdog when
 	entering idle state.
+	CAUTION: This property should be used with care, it actually makes the
+	watchdog not counting when the CPU is in idle state, therefore the
+	watchdog reset time depends on mean CPU usage and will not reset at all
+	if the CPU stop working while it is in idle state, which is probably
+	not what you want.
 - atmel,dbg-halt : Should be present if you want to stop the watchdog when
 	entering debug state.
 
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 2ca3d17..f91926f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -164,8 +164,6 @@
 
 --------------------------- file_system_type ---------------------------
 prototypes:
-	int (*get_sb) (struct file_system_type *, int,
-		       const char *, void *, struct vfsmount *);
 	struct dentry *(*mount) (struct file_system_type *, int,
 		       const char *, void *);
 	void (*kill_sb) (struct super_block *);
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt
index 1b528b2..fcf4d50 100644
--- a/Documentation/filesystems/dlmfs.txt
+++ b/Documentation/filesystems/dlmfs.txt
@@ -5,8 +5,8 @@
 
 dlmfs is built with OCFS2 as it requires most of its infrastructure.
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools web page:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index 28f8c08..4c49e54 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -8,8 +8,8 @@
 You'll want to install the ocfs2-tools package in order to at least
 get "mount.ocfs2" and "ocfs2_hb_ctl".
 
-Project web page:    http://oss.oracle.com/projects/ocfs2
-Tools web page:      http://oss.oracle.com/projects/ocfs2-tools
+Project web page:    http://ocfs2.wiki.kernel.org
+Tools git tree:      https://github.com/markfasheh/ocfs2-tools
 OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
 
 All code copyright 2005 Oracle except when otherwise noted.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index a27c950..6db0e5d 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -159,6 +159,22 @@
 rename or unlink will of course be noticed and handled).
 
 
+Multiple lower layers
+---------------------
+
+Multiple lower layers can now be given using the the colon (":") as a
+separator character between the directory names.  For example:
+
+  mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged
+
+As the example shows, "upperdir=" and "workdir=" may be omitted.  In
+that case the overlay will be read-only.
+
+The specified lower directories will be stacked beginning from the
+rightmost one and going left.  In the above example lower1 will be the
+top, lower2 the middle and lower3 the bottom layer.
+
+
 Non-standard behavior
 ---------------------
 
@@ -196,3 +212,15 @@
 filesystem are not allowed.  If the underlying filesystem is changed,
 the behavior of the overlay is undefined, though it will not result in
 a crash or deadlock.
+
+Testsuite
+---------
+
+There's testsuite developed by David Howells at:
+
+  git://git.infradead.org/users/dhowells/unionmount-testsuite.git
+
+Run as root:
+
+  # cd unionmount-testsuite
+  # ./run --ov
diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality
index 4556a3e..4aae8ed 100644
--- a/Documentation/i2c/functionality
+++ b/Documentation/i2c/functionality
@@ -12,7 +12,7 @@
 -----------------------
 
 For the most up-to-date list of functionality constants, please check
-<linux/i2c.h>!
+<uapi/linux/i2c.h>!
 
   I2C_FUNC_I2C                    Plain i2c-level commands (Pure SMBus
                                   adapters typically can not do these)
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 90bca6f..a63e5e0 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -3,8 +3,8 @@
 
 Introduction
 ------------
-Currently the ALPS touchpad driver supports five protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+Currently the ALPS touchpad driver supports seven protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7.
 
 Since roughly mid-2010 several new ALPS touchpads have been released and
 integrated into a variety of laptops and netbooks.  These new touchpads
@@ -240,3 +240,67 @@
  byte 3:    0  x23  x22   x21 x20  x19  x18   x17
  byte 4:    0   x9   x8    x7  x6   x5   x4    x3
  byte 5:    0  x16  x15   x14 x13  x12  x11   x10
+
+ALPS Absolute Mode - Protocol Version 6
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0   X6   X5   X4   X3   X2   X1   X0
+ byte 2:    0   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 3:    ?   Y7   X7    ?    ?    M    R    L
+ byte 4:   Z7   Z6   Z5   Z4   Z3   Z2   Z1   Z0
+ byte 5:    0    1    1    1    1    1    1    1
+
+For touchpad packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0    0    0    0   x3   x2   x1   x0
+ byte 2:    0    0    0    0   y3   y2   y1   y0
+ byte 3:    ?   x7   x6   x5   x4    ?    r    l
+ byte 4:    ?   y7   y6   y5   y4    ?    ?    ?
+ byte 5:   z7   z6   z5   z4   z3   z2   z1   z0
+
+(v6 touchpad does not have middle button)
+
+ALPS Absolute Mode - Protocol Version 7
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    0    1    0    0    1    0    0    0
+ byte 1:    1    1    *    *    1    M    R    L
+ byte 2:   X7    1   X5   X4   X3   X2   X1   X0
+ byte 3:   Z6    1   Y6   X6    1   Y2   Y1   Y0
+ byte 4:   Y7    0   Y5   Y4   Y3    1    1    0
+ byte 5:  T&P    0   Z5   Z4   Z3   Z2   Z1   Z0
+
+For touchpad packet, the format is:
+
+         packet-fmt     b7     b6     b5     b4     b3     b2     b1     b0
+ byte 0: TWO & MULTI     L      1      R      M      1   Y0-2   Y0-1   Y0-0
+ byte 0: NEW             L      1   X1-5      1      1   Y0-2   Y0-1   Y0-0
+ byte 1:             Y0-10   Y0-9   Y0-8   Y0-7   Y0-6   Y0-5   Y0-4   Y0-3
+ byte 2:             X0-11      1  X0-10   X0-9   X0-8   X0-7   X0-6   X0-5
+ byte 3:             X1-11      1   X0-4   X0-3      1   X0-2   X0-1   X0-0
+ byte 4: TWO         X1-10    TWO   X1-9   X1-8   X1-7   X1-6   X1-5   X1-4
+ byte 4: MULTI       X1-10    TWO   X1-9   X1-8   X1-7   X1-6   Y1-5      1
+ byte 4: NEW         X1-10    TWO   X1-9   X1-8   X1-7   X1-6      0      0
+ byte 5: TWO & NEW   Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6   Y1-5   Y1-4
+ byte 5: MULTI       Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6    F-1    F-0
+
+ L:         Left button
+ R / M:     Non-clickpads: Right / Middle button
+            Clickpads: When > 2 fingers are down, and some fingers
+            are in the button area, then the 2 coordinates reported
+            are for fingers outside the button area and these report
+            extra fingers being present in the right / left button
+            area. Note these fingers are not added to the F field!
+            so if a TWO packet is received and R = 1 then there are
+            3 fingers down, etc.
+ TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+               in NEW fmt
+ F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
index 2f9c5a5..8afb29a 100644
--- a/Documentation/power/suspend-and-interrupts.txt
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -40,8 +40,10 @@
 
 The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
 requesting a special-purpose interrupt.  It causes suspend_device_irqs() to
-leave the corresponding IRQ enabled so as to allow the interrupt to work all
-the time as expected.
+leave the corresponding IRQ enabled so as to allow the interrupt to work as
+expected during the suspend-resume cycle, but does not guarantee that the
+interrupt will wake the system from a suspended state -- for such cases it is
+necessary to use enable_irq_wake().
 
 Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
 user of it.  Thus, if the IRQ is shared, all of the interrupt handlers installed
@@ -110,8 +112,9 @@
 IRQF_NO_SUSPEND and enable_irq_wake()
 -------------------------------------
 
-There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
-flag on the same IRQ.
+There are very few valid reasons to use both enable_irq_wake() and the
+IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the
+same device.
 
 First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
 interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
@@ -120,4 +123,13 @@
 
 Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
 to individual interrupt handlers, so sharing an IRQ between a system wakeup
-interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
+interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally
+make sense.
+
+In rare cases an IRQ can be shared between a wakeup device driver and an
+IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver
+must be able to discern spurious IRQs from genuine wakeup events (signalling
+the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to
+ensure that the IRQ will function as a wakeup source, and must request the IRQ
+with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If
+these requirements are not met, it is not valid to use IRQF_COND_SUSPEND.
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 199f453..82fbdbc 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -3,7 +3,7 @@
 real-mode setup code of the kernel. References/settings to it mainly
 are in:
 
-  arch/x86/include/asm/bootparam.h
+  arch/x86/include/uapi/asm/bootparam.h
 
 
 Offset	Proto	Name		Meaning
diff --git a/MAINTAINERS b/MAINTAINERS
index 7cfcee4..9a76a40 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1030,6 +1030,16 @@
 F:	arch/arm/boot/dts/imx*
 F:	arch/arm/configs/imx*_defconfig
 
+ARM/FREESCALE VYBRID ARM ARCHITECTURE
+M:	Shawn Guo <shawn.guo@linaro.org>
+M:	Sascha Hauer <kernel@pengutronix.de>
+R:	Stefan Agner <stefan@agner.ch>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+F:	arch/arm/mach-imx/*vf610*
+F:	arch/arm/boot/dts/vf*
+
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1188,6 +1198,7 @@
 M:	Jason Cooper <jason@lakedaemon.net>
 M:	Andrew Lunn <andrew@lunn.ch>
 M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+M:	Gregory Clement <gregory.clement@free-electrons.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-dove/
@@ -1730,7 +1741,7 @@
 F:	drivers/net/ethernet/atheros/
 
 ATM
-M:	Chas Williams <chas@cmf.nrl.navy.mil>
+M:	Chas Williams <3chas3@gmail.com>
 L:	linux-atm-general@lists.sourceforge.net (moderated for non-subscribers)
 L:	netdev@vger.kernel.org
 W:	http://linux-atm.sourceforge.net
@@ -2065,7 +2076,7 @@
 BONDING DRIVER
 M:	Jay Vosburgh <j.vosburgh@gmail.com>
 M:	Veaceslav Falico <vfalico@gmail.com>
-M:	Andy Gospodarek <andy@greyhouse.net>
+M:	Andy Gospodarek <gospo@cumulusnetworks.com>
 L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/bonding/
 S:	Supported
@@ -2107,7 +2118,6 @@
 
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:	Christian Daudt <bcm@fixthebug.org>
-M:	Matt Porter <mporter@linaro.org>
 M:	Florian Fainelli <f.fainelli@gmail.com>
 L:	bcm-kernel-feedback-list@broadcom.com
 T:	git git://github.com/broadcom/mach-bcm
@@ -2369,8 +2379,9 @@
 
 CAN NETWORK LAYER
 M:	Oliver Hartkopp <socketcan@hartkopp.net>
+M:	Marc Kleine-Budde <mkl@pengutronix.de>
 L:	linux-can@vger.kernel.org
-W:	http://gitorious.org/linux-can
+W:	https://github.com/linux-can
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:	Maintained
@@ -2386,7 +2397,7 @@
 M:	Wolfgang Grandegger <wg@grandegger.com>
 M:	Marc Kleine-Budde <mkl@pengutronix.de>
 L:	linux-can@vger.kernel.org
-W:	http://gitorious.org/linux-can
+W:	https://github.com/linux-can
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
 S:	Maintained
@@ -3937,7 +3948,7 @@
 F:	drivers/staging/fbtft/
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
-M:	Robert Love <robert.w.love@intel.com>
+M:	Vasu Dev <vasu.dev@intel.com>
 L:	fcoe-devel@open-fcoe.org
 W:	www.Open-FCoE.org
 S:	Supported
@@ -7213,8 +7224,7 @@
 M:	Mark Fasheh <mfasheh@suse.com>
 M:	Joel Becker <jlbec@evilplan.org>
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
-W:	http://oss.oracle.com/projects/ocfs2/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
+W:	http://ocfs2.wiki.kernel.org
 S:	Supported
 F:	Documentation/filesystems/ocfs2.txt
 F:	Documentation/filesystems/dlmfs.txt
@@ -8481,6 +8491,14 @@
 L:	netdev@vger.kernel.org
 F:	drivers/net/ethernet/samsung/sxgbe/
 
+SAMSUNG THERMAL DRIVER
+M:	Lukasz Majewski <l.majewski@samsung.com>
+L:	linux-pm@vger.kernel.org
+L:	linux-samsung-soc@vger.kernel.org
+S:	Supported
+T:	https://github.com/lmajewski/linux-samsung-thermal.git
+F:	drivers/thermal/samsung/
+
 SAMSUNG USB2 PHY DRIVER
 M:	Kamil Debski <k.debski@samsung.com>
 L:	linux-kernel@vger.kernel.org
@@ -8567,7 +8585,7 @@
 F:	drivers/scsi/sr*
 
 SCSI RDMA PROTOCOL (SRP) INITIATOR
-M:	Bart Van Assche <bvanassche@acm.org>
+M:	Bart Van Assche <bart.vanassche@sandisk.com>
 L:	linux-rdma@vger.kernel.org
 S:	Supported
 W:	http://www.openfabrics.org
@@ -9719,6 +9737,11 @@
 S:	Maintained
 F:	drivers/thermal/ti-soc-thermal/
 
+TI CDCE706 CLOCK DRIVER
+M:	Max Filippov <jcmvbkbc@gmail.com>
+S:	Maintained
+F:	drivers/clk/clk-cdce706.c
+
 TI CLOCK DRIVER
 M:	Tero Kristo <t-kristo@ti.com>
 L:	linux-omap@vger.kernel.org
diff --git a/Makefile b/Makefile
index 19e256a..e734965 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
-VERSION = 3
-PATCHLEVEL = 19
+VERSION = 4
+PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Diseased Newt
+EXTRAVERSION = -rc4
+NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 4e54729..52312cb 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -47,9 +47,6 @@
 /* Forward declaration, a strange C thing */
 struct task_struct;
 
-/* Return saved PC of a blocked thread  */
-unsigned long thread_saved_pc(struct task_struct *t);
-
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
 
@@ -72,18 +69,21 @@
 #define release_segments(mm)        do { } while (0)
 
 #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
  * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
-#define KSTK_ESP(tsk)   (tsk->thread.ksp)
+#define TSK_K_ESP(tsk)		(tsk->thread.ksp)
 
-#define KSTK_REG(tsk, off)	(*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off)	(*((unsigned int *)(TSK_K_ESP(tsk) + \
 					sizeof(struct callee_regs) + off)))
 
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk)    KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk)	TSK_K_BLINK(tsk)
 
 extern void start_thread(struct pt_regs * regs, unsigned long pc,
 			 unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 0000000..b29b606
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk:		NULL for current task, specific task otherwise
+ * @regs:		pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * 			If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * 			use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn:	Callback invoked for each frame unwound
+ * 			Returns 0 to continue unwinding, -1 to stop
+ * @arg:		Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ *  - synchronous unwinding (e.g. dump_stack): @tsk  NULL, @regs  NULL
+ *  - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs  NULL
+ *  - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+	struct task_struct *tsk, struct pt_regs *regs,
+	int (*consumer_fn) (unsigned int, void *),
+	void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index fdd8971..98c00a2 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -192,29 +192,6 @@
 	return 0;
 }
 
-/*
- * API: expected by schedular Code: If thread is sleeping where is that.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- * So we hard code that anyways.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-	struct pt_regs *regs = task_pt_regs(t);
-	unsigned long blink = 0;
-
-	/*
-	 * If the thread being queried for in not itself calling this, then it
-	 * implies it is not executing, which in turn implies it is sleeping,
-	 * which in turn implies it got switched OUT by the schedular.
-	 * In that case, it's kernel mode blink can reliably retrieved as per
-	 * the picture above (right above pt_regs).
-	 */
-	if (t != current && t->state != TASK_RUNNING)
-		blink = *((unsigned int *)regs - 1);
-
-	return blink;
-}
-
 int elf_check_arch(const struct elf32_hdr *x)
 {
 	unsigned int eflags;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 9ce47cf..92320d6 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -43,6 +43,10 @@
 				   struct pt_regs *regs,
 				   struct unwind_frame_info *frame_info)
 {
+	/*
+	 * synchronous unwinding (e.g. dump_stack)
+	 *  - uses current values of SP and friends
+	 */
 	if (tsk == NULL && regs == NULL) {
 		unsigned long fp, sp, blink, ret;
 		frame_info->task = current;
@@ -61,12 +65,17 @@
 		frame_info->regs.r63 = ret;
 		frame_info->call_frame = 0;
 	} else if (regs == NULL) {
+		/*
+		 * Asynchronous unwinding of sleeping task
+		 *  - Gets SP etc from task's pt_regs (saved bottom of kernel
+		 *    mode stack of task)
+		 */
 
 		frame_info->task = tsk;
 
-		frame_info->regs.r27 = KSTK_FP(tsk);
-		frame_info->regs.r28 = KSTK_ESP(tsk);
-		frame_info->regs.r31 = KSTK_BLINK(tsk);
+		frame_info->regs.r27 = TSK_K_FP(tsk);
+		frame_info->regs.r28 = TSK_K_ESP(tsk);
+		frame_info->regs.r31 = TSK_K_BLINK(tsk);
 		frame_info->regs.r63 = (unsigned int)__switch_to;
 
 		/* In the prologue of __switch_to, first FP is saved on stack
@@ -83,6 +92,10 @@
 		frame_info->call_frame = 0;
 
 	} else {
+		/*
+		 * Asynchronous unwinding of intr/exception
+		 *  - Just uses the pt_regs passed
+		 */
 		frame_info->task = tsk;
 
 		frame_info->regs.r27 = regs->fp;
@@ -95,7 +108,7 @@
 
 #endif
 
-static noinline unsigned int
+notrace noinline unsigned int
 arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
 		int (*consumer_fn) (unsigned int, void *), void *arg)
 {
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 7ff5b5c..74db59b 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <asm/disasm.h>
@@ -253,6 +254,7 @@
 		}
 	}
 
+	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
 	return 0;
 
 fault:
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 563cb27..6a2e006 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -14,6 +14,7 @@
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/perf_event.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu.h>
 
@@ -139,13 +140,20 @@
 			return;
 	}
 
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
 	if (likely(!(fault & VM_FAULT_ERROR))) {
 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
 			/* To avoid updating stats twice for retry case */
-			if (fault & VM_FAULT_MAJOR)
+			if (fault & VM_FAULT_MAJOR) {
 				tsk->maj_flt++;
-			else
+				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+					      regs, address);
+			} else {
 				tsk->min_flt++;
+				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+					      regs, address);
+			}
 
 			if (fault & VM_FAULT_RETRY) {
 				flags &= ~FAULT_FLAG_ALLOW_RETRY;
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7f99cd6..eb7bb51 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@
 machine-$(CONFIG_ARCH_CLPS711X)		+= clps711x
 machine-$(CONFIG_ARCH_CNS3XXX)		+= cns3xxx
 machine-$(CONFIG_ARCH_DAVINCI)		+= davinci
+machine-$(CONFIG_ARCH_DIGICOLOR)	+= digicolor
 machine-$(CONFIG_ARCH_DOVE)		+= dove
 machine-$(CONFIG_ARCH_EBSA110)		+= ebsa110
 machine-$(CONFIG_ARCH_EFM32)		+= efm32
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 6cc25ed..c3255e0 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -195,6 +195,7 @@
 
 &usb0 {
 	status = "okay";
+	dr_mode = "peripheral";
 };
 
 &usb1 {
@@ -300,3 +301,11 @@
 	cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
 	cd-inverted;
 };
+
+&aes {
+	status = "okay";
+};
+
+&sham {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 83d40f7..6b849372 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -24,11 +24,3 @@
 &mmc1 {
 	vmmc-supply = <&ldo3_reg>;
 };
-
-&sham {
-	status = "okay";
-};
-
-&aes {
-	status = "okay";
-};
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index 7266a00..5c5667a 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -328,6 +328,10 @@
 	dual_emac_res_vlan = <3>;
 };
 
+&phy_sel {
+	rmii-clock-ext;
+};
+
 &mac {
 	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&cpsw_default>;
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index 712edce..071b56a 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -99,7 +99,7 @@
 	ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <0>;
 		reg = <0x0664>;
 	};
@@ -107,7 +107,7 @@
 	ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <1>;
 		reg = <0x0664>;
 	};
@@ -115,7 +115,7 @@
 	ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <2>;
 		reg = <0x0664>;
 	};
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index f9a17e2..0198f5a 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -133,20 +133,6 @@
 		>;
 	};
 
-	i2c1_pins_default: i2c1_pins_default {
-		pinctrl-single,pins = <
-			0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */
-			0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */
-		>;
-	};
-
-	i2c1_pins_sleep: i2c1_pins_sleep {
-		pinctrl-single,pins = <
-			0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */
-			0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */
-		>;
-	};
-
 	mmc1_pins_default: pinmux_mmc1_pins_default {
 		pinctrl-single,pins = <
 			0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
@@ -254,7 +240,7 @@
 	status = "okay";
 	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&i2c0_pins_default>;
-	pinctrl-1 = <&i2c0_pins_default>;
+	pinctrl-1 = <&i2c0_pins_sleep>;
 	clock-frequency = <400000>;
 
 	at24@50 {
@@ -262,17 +248,10 @@
 		pagesize = <64>;
 		reg = <0x50>;
 	};
-};
-
-&i2c1 {
-	status = "okay";
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&i2c1_pins_default>;
-	pinctrl-1 = <&i2c1_pins_default>;
-	clock-frequency = <400000>;
 
 	tps: tps62362@60 {
 		compatible = "ti,tps62362";
+		reg = <0x60>;
 		regulator-name = "VDD_MPU";
 		regulator-min-microvolt = <950000>;
 		regulator-max-microvolt = <1330000>;
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index c7dc9da..cfb4968 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -107,7 +107,7 @@
 	ehrpwm0_tbclk: ehrpwm0_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <0>;
 		reg = <0x0664>;
 	};
@@ -115,7 +115,7 @@
 	ehrpwm1_tbclk: ehrpwm1_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <1>;
 		reg = <0x0664>;
 	};
@@ -123,7 +123,7 @@
 	ehrpwm2_tbclk: ehrpwm2_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <2>;
 		reg = <0x0664>;
 	};
@@ -131,7 +131,7 @@
 	ehrpwm3_tbclk: ehrpwm3_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <4>;
 		reg = <0x0664>;
 	};
@@ -139,7 +139,7 @@
 	ehrpwm4_tbclk: ehrpwm4_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <5>;
 		reg = <0x0664>;
 	};
@@ -147,7 +147,7 @@
 	ehrpwm5_tbclk: ehrpwm5_tbclk {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
-		clocks = <&dpll_per_m2_ck>;
+		clocks = <&l4ls_gclk>;
 		ti,bit-shift = <6>;
 		reg = <0x0664>;
 	};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 03750af..6463f9e 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -549,14 +549,6 @@
 	pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-	extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-	extcon = <&extcon_usb2>;
-};
-
 &usb2 {
 	dr_mode = "peripheral";
 };
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index fff0ee6..e7f0a4a 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -494,12 +494,12 @@
 
 					pinctrl_usart3_rts: usart3_rts-0 {
 						atmel,pins =
-							<AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>;	/* PC8 periph B */
+							<AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>;
 					};
 
 					pinctrl_usart3_cts: usart3_cts-0 {
 						atmel,pins =
-							<AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;	/* PC10 periph B */
+							<AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>;
 					};
 				};
 
@@ -853,7 +853,7 @@
 			};
 
 			usb1: gadget@fffa4000 {
-				compatible = "atmel,at91rm9200-udc";
+				compatible = "atmel,at91sam9260-udc";
 				reg = <0xfffa4000 0x4000>;
 				interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
 				clocks = <&udc_clk>, <&udpck>;
@@ -976,7 +976,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index e247b0b..d55fdf2 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -124,11 +124,12 @@
 			};
 
 			usb1: gadget@fffa4000 {
-				compatible = "atmel,at91rm9200-udc";
+				compatible = "atmel,at91sam9261-udc";
 				reg = <0xfffa4000 0x4000>;
 				interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>;
-				clocks = <&usb>, <&udc_clk>, <&udpck>;
-				clock-names = "usb_clk", "udc_clk", "udpck";
+				clocks = <&udc_clk>, <&udpck>;
+				clock-names = "pclk", "hclk";
+				atmel,matrix = <&matrix>;
 				status = "disabled";
 			};
 
@@ -262,7 +263,7 @@
 			};
 
 			matrix: matrix@ffffee00 {
-				compatible = "atmel,at91sam9260-bus-matrix";
+				compatible = "atmel,at91sam9260-bus-matrix", "syscon";
 				reg = <0xffffee00 0x200>;
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 1f67bb4..fce301c 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -69,7 +69,7 @@
 
 	sram1: sram@00500000 {
 		compatible = "mmio-sram";
-		reg = <0x00300000 0x4000>;
+		reg = <0x00500000 0x4000>;
 	};
 
 	ahb {
@@ -856,7 +856,7 @@
 			};
 
 			usb1: gadget@fff78000 {
-				compatible = "atmel,at91rm9200-udc";
+				compatible = "atmel,at91sam9263-udc";
 				reg = <0xfff78000 0x4000>;
 				interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>;
 				clocks = <&udc_clk>, <&udpck>;
@@ -905,7 +905,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index ee80aa9..488af63 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -1116,7 +1116,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
@@ -1301,7 +1300,7 @@
 			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
 			reg = <0x00800000 0x100000>;
 			interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-			clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+			clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
 			clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index c2666a7..0c53a375 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -894,7 +894,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 818dabd..d221179 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1066,7 +1066,7 @@
 				reg = <0x00500000 0x80000
 				       0xf803c000 0x400>;
 				interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&usb>, <&udphs_clk>;
+				clocks = <&utmi>, <&udphs_clk>;
 				clock-names = "hclk", "pclk";
 				status = "disabled";
 
@@ -1130,7 +1130,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
@@ -1186,7 +1185,7 @@
 			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
 			reg = <0x00700000 0x100000>;
 			interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-			clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+			clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
 			clock-names = "usb_clk", "ehci_clk", "uhpck";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 5126f9e..ff5fb6a 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -70,6 +70,26 @@
 		};
 	};
 
+	i2c0: i2c@18008000 {
+		compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+		reg = <0x18008000 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+		clock-frequency = <100000>;
+		status = "disabled";
+	};
+
+	i2c1: i2c@1800b000 {
+		compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+		reg = <0x1800b000 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+		clock-frequency = <100000>;
+		status = "disabled";
+	};
+
 	uart0: serial@18020000 {
 		compatible = "snps,dw-apb-uart";
 		reg = <0x18020000 0x100>;
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index d2d8e94..f46329c 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -66,8 +66,9 @@
 			reg = <0x1d000 0x1000>;
 			cache-unified;
 			cache-level = <2>;
-			cache-sets = <16>;
-			cache-size = <0x80000>;
+			cache-size = <524288>;
+			cache-sets = <1024>;
+			cache-line-size = <32>;
 			interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index 857d028..d3a29c1 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -35,6 +35,18 @@
 			DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0)	/* SPI_D1 */
 		>;
 	};
+
+	usb0_pins: pinmux_usb0_pins {
+		pinctrl-single,pins = <
+			DM816X_IOPAD(0x0d00, MUX_MODE0)			/* USB0_DRVVBUS */
+		>;
+	};
+
+	usb1_pins: pinmux_usb0_pins {
+		pinctrl-single,pins = <
+			DM816X_IOPAD(0x0d04, MUX_MODE0)			/* USB1_DRVVBUS */
+		>;
+	};
 };
 
 &i2c1 {
@@ -127,3 +139,16 @@
 &mmc1 {
 	vmmc-supply = <&vmmcsd_fixed>;
 };
+
+/* At least dm8168-evm rev c won't support multipoint, later may */
+&usb0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_pins>;
+	mentor,multipoint = <0>;
+};
+
+&usb1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb1_pins>;
+	mentor,multipoint = <0>;
+};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index d98d0f7..3c97b5f 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -97,10 +97,31 @@
 
 			/* Device Configuration Registers */
 			scm_conf: syscon@600 {
-				compatible = "syscon";
+				compatible = "syscon", "simple-bus";
 				reg = <0x600 0x110>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				ranges = <0 0x600 0x110>;
+
+				usb_phy0: usb-phy@20 {
+					compatible = "ti,dm8168-usb-phy";
+					reg = <0x20 0x8>;
+					reg-names = "phy";
+					clocks = <&main_fapll 6>;
+					clock-names = "refclk";
+					#phy-cells = <0>;
+					syscon = <&scm_conf>;
+				};
+
+				usb_phy1: usb-phy@28 {
+					compatible = "ti,dm8168-usb-phy";
+					reg = <0x28 0x8>;
+					reg-names = "phy";
+					clocks = <&main_fapll 6>;
+					clock-names = "refclk";
+					#phy-cells = <0>;
+					syscon = <&scm_conf>;
+				};
 			};
 
 			scrm_clocks: clocks {
@@ -357,7 +378,10 @@
 				reg-names = "mc", "control";
 				interrupts = <18>;
 				interrupt-names = "mc";
-				dr_mode = "otg";
+				dr_mode = "host";
+				interface-type = <0>;
+				phys = <&usb_phy0>;
+				phy-names = "usb2-phy";
 				mentor,multipoint = <1>;
 				mentor,num-eps = <16>;
 				mentor,ram-bits = <12>;
@@ -366,13 +390,15 @@
 
 			usb1: usb@47401800 {
 				compatible = "ti,musb-am33xx";
-				status = "disabled";
 				reg = <0x47401c00 0x400
 				       0x47401800 0x200>;
 				reg-names = "mc", "control";
 				interrupts = <19>;
 				interrupt-names = "mc";
-				dr_mode = "otg";
+				dr_mode = "host";
+				interface-type = <0>;
+				phys = <&usb_phy1>;
+				phy-names = "usb2-phy";
 				mentor,multipoint = <1>;
 				mentor,num-eps = <16>;
 				mentor,ram-bits = <12>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 746cddb..7563d7c 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -263,17 +263,15 @@
 
 	dcan1_pins_default: dcan1_pins_default {
 		pinctrl-single,pins = <
-			0x3d0   (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
-			0x3d4   (MUX_MODE15)		/* dcan1_rx.off */
-			0x418   (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+			0x3d0   (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+			0x418   (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */
 		>;
 	};
 
 	dcan1_pins_sleep: dcan1_pins_sleep {
 		pinctrl-single,pins = <
-			0x3d0   (MUX_MODE15)	/* dcan1_tx.off */
-			0x3d4   (MUX_MODE15)	/* dcan1_rx.off */
-			0x418   (MUX_MODE15)	/* wakeup0.off */
+			0x3d0   (MUX_MODE15 | PULL_UP)	/* dcan1_tx.off */
+			0x418   (MUX_MODE15 | PULL_UP)	/* wakeup0.off */
 		>;
 	};
 };
@@ -543,14 +541,6 @@
 	};
 };
 
-&omap_dwc3_1 {
-	extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-	extcon = <&extcon_usb2>;
-};
-
 &usb1 {
 	dr_mode = "peripheral";
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5827fed..127608d 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -249,8 +249,8 @@
 				     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
 			#dma-cells = <1>;
-			#dma-channels = <32>;
-			#dma-requests = <127>;
+			dma-channels = <32>;
+			dma-requests = <127>;
 		};
 
 		gpio1: gpio@4ae10000 {
@@ -1090,8 +1090,8 @@
 				      <0x4A096800 0x40>; /* pll_ctrl */
 				reg-names = "phy_rx", "phy_tx", "pll_ctrl";
 				ctrl-module = <&omap_control_sata>;
-				clocks = <&sys_clkin1>;
-				clock-names = "sysclk";
+				clocks = <&sys_clkin1>, <&sata_ref_clk>;
+				clock-names = "sysclk", "refclk";
 				#phy-cells = <0>;
 			};
 
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 4d87117..40ed539 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -119,17 +119,15 @@
 
 	dcan1_pins_default: dcan1_pins_default {
 		pinctrl-single,pins = <
-			0x3d0   (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */
-			0x3d4   (MUX_MODE15)		/* dcan1_rx.off */
-			0x418   (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */
+			0x3d0   (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+			0x418   (PULL_UP | MUX_MODE1)	/* wakeup0.dcan1_rx */
 		>;
 	};
 
 	dcan1_pins_sleep: dcan1_pins_sleep {
 		pinctrl-single,pins = <
-			0x3d0   (MUX_MODE15)	/* dcan1_tx.off */
-			0x3d4   (MUX_MODE15)	/* dcan1_rx.off */
-			0x418   (MUX_MODE15)	/* wakeup0.off */
+			0x3d0   (MUX_MODE15 | PULL_UP)	/* dcan1_tx.off */
+			0x418   (MUX_MODE15 | PULL_UP)	/* wakeup0.off */
 		>;
 	};
 
@@ -380,14 +378,6 @@
 	phy-supply = <&ldo4_reg>;
 };
 
-&omap_dwc3_1 {
-	extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-	extcon = <&extcon_usb2>;
-};
-
 &usb1 {
 	dr_mode = "peripheral";
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 4bdcbd6..99b09a4 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -243,10 +243,18 @@
 		ti,invert-autoidle-bit;
 	};
 
+	dpll_core_byp_mux: dpll_core_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x012c>;
+	};
+
 	dpll_core_ck: dpll_core_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-core-clock";
-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
 		reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
 	};
 
@@ -309,10 +317,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_dsp_byp_mux: dpll_dsp_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x0240>;
+	};
+
 	dpll_dsp_ck: dpll_dsp_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
+		clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
 		reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
 	};
 
@@ -335,10 +351,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_iva_byp_mux: dpll_iva_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x01ac>;
+	};
+
 	dpll_iva_ck: dpll_iva_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
+		clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
 	};
 
@@ -361,10 +385,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_gpu_byp_mux: dpll_gpu_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x02e4>;
+	};
+
 	dpll_gpu_ck: dpll_gpu_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
 		reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
 	};
 
@@ -398,10 +430,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_ddr_byp_mux: dpll_ddr_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x021c>;
+	};
+
 	dpll_ddr_ck: dpll_ddr_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
 		reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
 	};
 
@@ -416,10 +456,18 @@
 		ti,invert-autoidle-bit;
 	};
 
+	dpll_gmac_byp_mux: dpll_gmac_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x02b4>;
+	};
+
 	dpll_gmac_ck: dpll_gmac_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
+		clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
 		reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
 	};
 
@@ -482,10 +530,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_eve_byp_mux: dpll_eve_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x0290>;
+	};
+
 	dpll_eve_ck: dpll_eve_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
+		clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
 		reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
 	};
 
@@ -1249,10 +1305,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_per_byp_mux: dpll_per_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x014c>;
+	};
+
 	dpll_per_ck: dpll_per_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
+		clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
 		reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
 	};
 
@@ -1275,10 +1339,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_usb_byp_mux: dpll_usb_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x018c>;
+	};
+
 	dpll_usb_ck: dpll_usb_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-j-type-clock";
-		clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
+		clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
 		reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 277b48b..ac6b0ae 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -18,6 +18,7 @@
  */
 
 #include "skeleton.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 #include <dt-bindings/clock/exynos3250.h>
 
 / {
@@ -193,6 +194,7 @@
 			interrupts = <0 216 0>;
 			clocks = <&cmu CLK_TMU_APBIF>;
 			clock-names = "tmu_apbif";
+			#include "exynos4412-tmu-sensor-conf.dtsi"
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
new file mode 100644
index 0000000..735cb2f
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Device tree sources for Exynos4 thermal zone
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+thermal-zones {
+	cpu_thermal: cpu-thermal {
+		thermal-sensors = <&tmu 0>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			cpu_alert0: cpu-alert-0 {
+				temperature = <70000>; /* millicelsius */
+				hysteresis = <10000>; /* millicelsius */
+				type = "active";
+			};
+			cpu_alert1: cpu-alert-1 {
+				temperature = <95000>; /* millicelsius */
+				hysteresis = <10000>; /* millicelsius */
+				type = "active";
+			};
+			cpu_alert2: cpu-alert-2 {
+				temperature = <110000>; /* millicelsius */
+				hysteresis = <10000>; /* millicelsius */
+				type = "active";
+			};
+			cpu_crit0: cpu-crit-0 {
+				temperature = <120000>; /* millicelsius */
+				hysteresis = <0>; /* millicelsius */
+				type = "critical";
+			};
+		};
+		cooling-maps {
+			map0 {
+				trip = <&cpu_alert0>;
+			};
+			map1 {
+				trip = <&cpu_alert1>;
+			};
+		};
+	};
+};
+};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 76173ca..77ea547 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -38,6 +38,7 @@
 		i2c5 = &i2c_5;
 		i2c6 = &i2c_6;
 		i2c7 = &i2c_7;
+		i2c8 = &i2c_8;
 		csis0 = &csis_0;
 		csis1 = &csis_1;
 		fimc0 = &fimc_0;
@@ -104,6 +105,7 @@
 		compatible = "samsung,exynos4210-pd";
 		reg = <0x10023C20 0x20>;
 		#power-domain-cells = <0>;
+		power-domains = <&pd_lcd0>;
 	};
 
 	pd_cam: cam-power-domain@10023C00 {
@@ -554,6 +556,22 @@
 		status = "disabled";
 	};
 
+	i2c_8: i2c@138E0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "samsung,s3c2440-hdmiphy-i2c";
+		reg = <0x138E0000 0x100>;
+		interrupts = <0 93 0>;
+		clocks = <&clock CLK_I2C_HDMI>;
+		clock-names = "i2c";
+		status = "disabled";
+
+		hdmi_i2c_phy: hdmiphy@38 {
+			compatible = "exynos4210-hdmiphy";
+			reg = <0x38>;
+		};
+	};
+
 	spi_0: spi@13920000 {
 		compatible = "samsung,exynos4210-spi";
 		reg = <0x13920000 0x100>;
@@ -663,6 +681,33 @@
 		status = "disabled";
 	};
 
+	tmu: tmu@100C0000 {
+		#include "exynos4412-tmu-sensor-conf.dtsi"
+	};
+
+	hdmi: hdmi@12D00000 {
+		compatible = "samsung,exynos4210-hdmi";
+		reg = <0x12D00000 0x70000>;
+		interrupts = <0 92 0>;
+		clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy",
+			"mout_hdmi";
+		clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
+			<&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
+			<&clock CLK_MOUT_HDMI>;
+		phy = <&hdmi_i2c_phy>;
+		power-domains = <&pd_tv>;
+		samsung,syscon-phandle = <&pmu_system_controller>;
+		status = "disabled";
+	};
+
+	mixer: mixer@12C10000 {
+		compatible = "samsung,exynos4210-mixer";
+		interrupts = <0 91 0>;
+		reg = <0x12C10000 0x2100>, <0x12c00000 0x300>;
+		power-domains = <&pd_tv>;
+		status = "disabled";
+	};
+
 	ppmu_dmc0: ppmu_dmc0@106a0000 {
 		compatible = "samsung,exynos-ppmu";
 		reg = <0x106a0000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 3d6652a..32c5fd8 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -426,6 +426,25 @@
 		status = "okay";
 	};
 
+	tmu@100C0000 {
+		status = "okay";
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			cooling-maps {
+				map0 {
+				     /* Corresponds to 800MHz at freq_table */
+				     cooling-device = <&cpu0 2 2>;
+				};
+				map1 {
+				     /* Corresponds to 200MHz at freq_table */
+				     cooling-device = <&cpu0 4 4>;
+			       };
+		       };
+		};
+	};
+
 	camera {
 		pinctrl-names = "default";
 		pinctrl-0 = <>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index b57e6b8..d4f2b11 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -505,6 +505,63 @@
 			assigned-clock-rates = <0>, <160000000>;
 		};
 	};
+
+	hdmi_en: voltage-regulator-hdmi-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "HDMI_5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpe0 1 0>;
+		enable-active-high;
+	};
+
+	hdmi_ddc: i2c-ddc {
+		compatible = "i2c-gpio";
+		gpios = <&gpe4 2 0 &gpe4 3 0>;
+		i2c-gpio,delay-us = <100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		pinctrl-0 = <&i2c_ddc_bus>;
+		pinctrl-names = "default";
+		status = "okay";
+	};
+
+	mixer@12C10000 {
+		status = "okay";
+	};
+
+	hdmi@12D00000 {
+		hpd-gpio = <&gpx3 7 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&hdmi_hpd>;
+		hdmi-en-supply = <&hdmi_en>;
+		vdd-supply = <&ldo3_reg>;
+		vdd_osc-supply = <&ldo4_reg>;
+		vdd_pll-supply = <&ldo3_reg>;
+		ddc = <&hdmi_ddc>;
+		status = "okay";
+	};
+
+	i2c@138E0000 {
+		status = "okay";
+	};
+};
+
+&pinctrl_1 {
+	hdmi_hpd: hdmi-hpd {
+		samsung,pins = "gpx3-7";
+		samsung,pin-pud = <0>;
+	};
+};
+
+&pinctrl_0 {
+	i2c_ddc_bus: i2c-ddc-bus {
+		samsung,pins = "gpe4-2", "gpe4-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
 };
 
 &mdma1 {
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 67c832c..be89f83 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -21,6 +21,7 @@
 
 #include "exynos4.dtsi"
 #include "exynos4210-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 
 / {
 	compatible = "samsung,exynos4210", "samsung,exynos4";
@@ -35,10 +36,13 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@900 {
+		cpu0: cpu@900 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0x900>;
+			cooling-min-level = <4>;
+			cooling-max-level = <2>;
+			#cooling-cells = <2>; /* min followed by max */
 		};
 
 		cpu@901 {
@@ -153,16 +157,38 @@
 		reg = <0x03860000 0x1000>;
 	};
 
-	tmu@100C0000 {
+	tmu: tmu@100C0000 {
 		compatible = "samsung,exynos4210-tmu";
 		interrupt-parent = <&combiner>;
 		reg = <0x100C0000 0x100>;
 		interrupts = <2 4>;
 		clocks = <&clock CLK_TMU_APBIF>;
 		clock-names = "tmu_apbif";
+		samsung,tmu_gain = <15>;
+		samsung,tmu_reference_voltage = <7>;
 		status = "disabled";
 	};
 
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tmu 0>;
+
+			trips {
+			      cpu_alert0: cpu-alert-0 {
+				      temperature = <85000>; /* millicelsius */
+			      };
+			      cpu_alert1: cpu-alert-1 {
+				      temperature = <100000>; /* millicelsius */
+			      };
+			      cpu_alert2: cpu-alert-2 {
+				      temperature = <110000>; /* millicelsius */
+			      };
+			};
+		};
+	};
+
 	g2d@12800000 {
 		compatible = "samsung,s5pv210-g2d";
 		reg = <0x12800000 0x1000>;
@@ -203,6 +229,14 @@
 		};
 	};
 
+	mixer: mixer@12C10000 {
+		clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer",
+			"sclk_mixer";
+		clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+			<&clock CLK_SCLK_HDMI>, <&clock CLK_VP>,
+			<&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>;
+	};
+
 	ppmu_lcd1: ppmu_lcd1@12240000 {
 		compatible = "samsung,exynos-ppmu";
 		reg = <0x12240000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi
index dd0a43e..5be0328 100644
--- a/arch/arm/boot/dts/exynos4212.dtsi
+++ b/arch/arm/boot/dts/exynos4212.dtsi
@@ -26,10 +26,13 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@A00 {
+		cpu0: cpu@A00 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0xA00>;
+			cooling-min-level = <13>;
+			cooling-max-level = <7>;
+			#cooling-cells = <2>; /* min followed by max */
 		};
 
 		cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index de80b5b..adb4f6a 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -249,6 +249,20 @@
 					regulator-always-on;
 				};
 
+				ldo8_reg: ldo@8 {
+					regulator-compatible = "LDO8";
+					regulator-name = "VDD10_HDMI_1.0V";
+					regulator-min-microvolt = <1000000>;
+					regulator-max-microvolt = <1000000>;
+				};
+
+				ldo10_reg: ldo@10 {
+					regulator-compatible = "LDO10";
+					regulator-name = "VDDQ_MIPIHSI_1.8V";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+				};
+
 				ldo11_reg: LDO11 {
 					regulator-name = "VDD18_ABB1_1.8V";
 					regulator-min-microvolt = <1800000>;
@@ -411,6 +425,51 @@
 	ehci: ehci@12580000 {
 		status = "okay";
 	};
+
+	tmu@100C0000 {
+		vtmu-supply = <&ldo10_reg>;
+		status = "okay";
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			cooling-maps {
+				map0 {
+				     /* Corresponds to 800MHz at freq_table */
+				     cooling-device = <&cpu0 7 7>;
+				};
+				map1 {
+				     /* Corresponds to 200MHz at freq_table */
+				     cooling-device = <&cpu0 13 13>;
+			       };
+		       };
+		};
+	};
+
+	mixer: mixer@12C10000 {
+		status = "okay";
+	};
+
+	hdmi@12D00000 {
+		hpd-gpio = <&gpx3 7 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&hdmi_hpd>;
+		vdd-supply = <&ldo8_reg>;
+		vdd_osc-supply = <&ldo10_reg>;
+		vdd_pll-supply = <&ldo8_reg>;
+		ddc = <&hdmi_ddc>;
+		status = "okay";
+	};
+
+	hdmi_ddc: i2c@13880000 {
+		status = "okay";
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c2_bus>;
+	};
+
+	i2c@138E0000 {
+		status = "okay";
+	};
 };
 
 &pinctrl_1 {
@@ -425,4 +484,9 @@
 		samsung,pin-pud = <0>;
 		samsung,pin-drv = <0>;
 	};
+
+	hdmi_hpd: hdmi-hpd {
+		samsung,pins = "gpx3-7";
+		samsung,pin-pud = <1>;
+	};
 };
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
new file mode 100644
index 0000000..e3f7934
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos4412 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <40>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 21f7480..173ffa4 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -927,6 +927,21 @@
 		pulldown-ohm = <100000>; /* 100K */
 		io-channels = <&adc 2>;  /* Battery temperature */
 	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			cooling-maps {
+				map0 {
+				     /* Corresponds to 800MHz at freq_table */
+				     cooling-device = <&cpu0 7 7>;
+				};
+				map1 {
+				     /* Corresponds to 200MHz at freq_table */
+				     cooling-device = <&cpu0 13 13>;
+			       };
+		       };
+		};
+	};
 };
 
 &pmu_system_controller {
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 0f6ec93..68ad43b 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -26,10 +26,13 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@A00 {
+		cpu0: cpu@A00 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0xA00>;
+			cooling-min-level = <13>;
+			cooling-max-level = <7>;
+			#cooling-cells = <2>; /* min followed by max */
 		};
 
 		cpu@A01 {
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index f5e0ae7..6a6abe1 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -19,6 +19,7 @@
 
 #include "exynos4.dtsi"
 #include "exynos4x12-pinctrl.dtsi"
+#include "exynos4-cpu-thermal.dtsi"
 
 / {
 	aliases {
@@ -297,4 +298,15 @@
 		clock-names = "tmu_apbif";
 		status = "disabled";
 	};
+
+	hdmi: hdmi@12D00000 {
+		compatible = "samsung,exynos4212-hdmi";
+	};
+
+	mixer: mixer@12C10000 {
+		compatible = "samsung,exynos4212-mixer";
+		clock-names = "mixer", "hdmi", "sclk_hdmi", "vp";
+		clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+			 <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>;
+	};
 };
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9bb1b0b..adbde1a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -20,7 +20,7 @@
 #include <dt-bindings/clock/exynos5250.h>
 #include "exynos5.dtsi"
 #include "exynos5250-pinctrl.dtsi"
-
+#include "exynos4-cpu-thermal.dtsi"
 #include <dt-bindings/clock/exynos-audss-clk.h>
 
 / {
@@ -58,11 +58,14 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a15";
 			reg = <0>;
 			clock-frequency = <1700000000>;
+			cooling-min-level = <15>;
+			cooling-max-level = <9>;
+			#cooling-cells = <2>; /* min followed by max */
 		};
 		cpu@1 {
 			device_type = "cpu";
@@ -102,6 +105,12 @@
 		#power-domain-cells = <0>;
 	};
 
+	pd_disp1: disp1-power-domain@100440A0 {
+		compatible = "samsung,exynos4210-pd";
+		reg = <0x100440A0 0x20>;
+		#power-domain-cells = <0>;
+	};
+
 	clock: clock-controller@10010000 {
 		compatible = "samsung,exynos5250-clock";
 		reg = <0x10010000 0x30000>;
@@ -235,12 +244,32 @@
 		status = "disabled";
 	};
 
-	tmu@10060000 {
+	tmu: tmu@10060000 {
 		compatible = "samsung,exynos5250-tmu";
 		reg = <0x10060000 0x100>;
 		interrupts = <0 65 0>;
 		clocks = <&clock CLK_TMU>;
 		clock-names = "tmu_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-sensors = <&tmu 0>;
+
+			cooling-maps {
+				map0 {
+				     /* Corresponds to 800MHz at freq_table */
+				     cooling-device = <&cpu0 9 9>;
+				};
+				map1 {
+				     /* Corresponds to 200MHz at freq_table */
+				     cooling-device = <&cpu0 15 15>;
+			       };
+		       };
+		};
 	};
 
 	serial@12C00000 {
@@ -719,6 +748,7 @@
 	hdmi: hdmi {
 		compatible = "samsung,exynos4212-hdmi";
 		reg = <0x14530000 0x70000>;
+		power-domains = <&pd_disp1>;
 		interrupts = <0 95 0>;
 		clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
 			 <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
@@ -731,9 +761,11 @@
 	mixer {
 		compatible = "samsung,exynos5250-mixer";
 		reg = <0x14450000 0x10000>;
+		power-domains = <&pd_disp1>;
 		interrupts = <0 94 0>;
-		clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
-		clock-names = "mixer", "sclk_hdmi";
+		clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+			 <&clock CLK_SCLK_HDMI>;
+		clock-names = "mixer", "hdmi", "sclk_hdmi";
 	};
 
 	dp_phy: video-phy@10040720 {
@@ -743,6 +775,7 @@
 	};
 
 	dp: dp-controller@145B0000 {
+		power-domains = <&pd_disp1>;
 		clocks = <&clock CLK_DP>;
 		clock-names = "dp";
 		phys = <&dp_phy>;
@@ -750,6 +783,7 @@
 	};
 
 	fimd: fimd@14400000 {
+		power-domains = <&pd_disp1>;
 		clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
 		clock-names = "sclk_fimd", "fimd";
 	};
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
new file mode 100644
index 0000000..5d31fc1
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Device tree sources for default Exynos5420 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+	cpu-alert-0 {
+		temperature = <85000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "active";
+	};
+	cpu-alert-1 {
+		temperature = <103000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "active";
+	};
+	cpu-alert-2 {
+		temperature = <110000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "active";
+	};
+	cpu-crit-0 {
+		temperature = <1200000>; /* millicelsius */
+		hysteresis = <0>; /* millicelsius */
+		type = "critical";
+	};
+};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 9dc2e97..c0e98cf 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -740,8 +740,9 @@
 		compatible = "samsung,exynos5420-mixer";
 		reg = <0x14450000 0x10000>;
 		interrupts = <0 94 0>;
-		clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>;
-		clock-names = "mixer", "sclk_hdmi";
+		clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+			 <&clock CLK_SCLK_HDMI>;
+		clock-names = "mixer", "hdmi", "sclk_hdmi";
 		power-domains = <&disp_pd>;
 	};
 
@@ -782,6 +783,7 @@
 		interrupts = <0 65 0>;
 		clocks = <&clock CLK_TMU>;
 		clock-names = "tmu_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
 	};
 
 	tmu_cpu1: tmu@10064000 {
@@ -790,6 +792,7 @@
 		interrupts = <0 183 0>;
 		clocks = <&clock CLK_TMU>;
 		clock-names = "tmu_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
 	};
 
 	tmu_cpu2: tmu@10068000 {
@@ -798,6 +801,7 @@
 		interrupts = <0 184 0>;
 		clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
 		clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
 	};
 
 	tmu_cpu3: tmu@1006c000 {
@@ -806,6 +810,7 @@
 		interrupts = <0 185 0>;
 		clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
 		clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
 	};
 
 	tmu_gpu: tmu@100a0000 {
@@ -814,6 +819,30 @@
 		interrupts = <0 215 0>;
 		clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
 		clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+		#include "exynos4412-tmu-sensor-conf.dtsi"
+	};
+
+	thermal-zones {
+		cpu0_thermal: cpu0-thermal {
+			thermal-sensors = <&tmu_cpu0>;
+			#include "exynos5420-trip-points.dtsi"
+		};
+		cpu1_thermal: cpu1-thermal {
+		       thermal-sensors = <&tmu_cpu1>;
+		       #include "exynos5420-trip-points.dtsi"
+		};
+		cpu2_thermal: cpu2-thermal {
+		       thermal-sensors = <&tmu_cpu2>;
+		       #include "exynos5420-trip-points.dtsi"
+		};
+		cpu3_thermal: cpu3-thermal {
+		       thermal-sensors = <&tmu_cpu3>;
+		       #include "exynos5420-trip-points.dtsi"
+		};
+		gpu_thermal: gpu-thermal {
+		       thermal-sensors = <&tmu_gpu>;
+		       #include "exynos5420-trip-points.dtsi"
+		};
 	};
 
         watchdog: watchdog@101D0000 {
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
new file mode 100644
index 0000000..7b2fba0
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi
@@ -0,0 +1,24 @@
+/*
+ * Device tree sources for Exynos5440 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <5>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <0x5d2d>;
+samsung,tmu_min_efuse_value = <16>;
+samsung,tmu_max_efuse_value = <76>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <70>;
+samsung,tmu_default_temp_offset = <25>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
new file mode 100644
index 0000000..48adfa8
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for default Exynos5440 thermal zone definition
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+polling-delay-passive = <0>;
+polling-delay = <0>;
+trips {
+	cpu-alert-0 {
+		temperature = <100000>; /* millicelsius */
+		hysteresis = <0>; /* millicelsius */
+		type = "active";
+	};
+	cpu-crit-0 {
+		temperature = <1050000>; /* millicelsius */
+		hysteresis = <0>; /* millicelsius */
+		type = "critical";
+	};
+};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 8f3373c..59d9416 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -219,6 +219,7 @@
 		interrupts = <0 58 0>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
+		#include "exynos5440-tmu-sensor-conf.dtsi"
 	};
 
 	tmuctrl_1: tmuctrl@16011C {
@@ -227,6 +228,7 @@
 		interrupts = <0 58 0>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
+		#include "exynos5440-tmu-sensor-conf.dtsi"
 	};
 
 	tmuctrl_2: tmuctrl@160120 {
@@ -235,6 +237,22 @@
 		interrupts = <0 58 0>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
+		#include "exynos5440-tmu-sensor-conf.dtsi"
+	};
+
+	thermal-zones {
+		cpu0_thermal: cpu0-thermal {
+			thermal-sensors = <&tmuctrl_0>;
+			#include "exynos5440-trip-points.dtsi"
+		};
+		cpu1_thermal: cpu1-thermal {
+		       thermal-sensors = <&tmuctrl_1>;
+		       #include "exynos5440-trip-points.dtsi"
+		};
+		cpu2_thermal: cpu2-thermal {
+		       thermal-sensors = <&tmuctrl_2>;
+		       #include "exynos5440-trip-points.dtsi"
+		};
 	};
 
 	sata@210000 {
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index f1cd214..a626e6d 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -35,6 +35,7 @@
 			regulator-max-microvolt = <5000000>;
 			gpio = <&gpio3 22 0>;
 			enable-active-high;
+			vin-supply = <&swbst_reg>;
 		};
 
 		reg_usb_h1_vbus: regulator@1 {
@@ -45,6 +46,7 @@
 			regulator-max-microvolt = <5000000>;
 			gpio = <&gpio1 29 0>;
 			enable-active-high;
+			vin-supply = <&swbst_reg>;
 		};
 
 		reg_audio: regulator@2 {
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index fda4932..945887d 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -52,6 +52,7 @@
 			regulator-max-microvolt = <5000000>;
 			gpio = <&gpio4 0 0>;
 			enable-active-high;
+			vin-supply = <&swbst_reg>;
 		};
 
 		reg_usb_otg2_vbus: regulator@1 {
@@ -62,6 +63,7 @@
 			regulator-max-microvolt = <5000000>;
 			gpio = <&gpio4 2 0>;
 			enable-active-high;
+			vin-supply = <&swbst_reg>;
 		};
 
 		reg_aud3v: regulator@2 {
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 59d1c29..578fa2a 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -87,8 +87,8 @@
 				     <14>,
 				     <15>;
 			#dma-cells = <1>;
-			#dma-channels = <32>;
-			#dma-requests = <64>;
+			dma-channels = <32>;
+			dma-requests = <64>;
 		};
 
 		i2c1: i2c@48070000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 6040327..db80f9d 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -16,6 +16,13 @@
 	model = "Nokia N900";
 	compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
 
+	aliases {
+		i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+	};
+
 	cpus {
 		cpu@0 {
 			cpu0-supply = <&vcc>;
@@ -704,7 +711,7 @@
 		compatible = "smsc,lan91c94";
 		interrupt-parent = <&gpio2>;
 		interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;	/* gpio54 */
-		reg = <1 0x300 0xf>;		/* 16 byte IO range at offset 0x300 */
+		reg = <1 0 0xf>;		/* 16 byte IO range */
 		bank-width = <2>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&ethernet_pins>;
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 01b7111..f4f78c4 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -155,8 +155,8 @@
 				     <14>,
 				     <15>;
 			#dma-cells = <1>;
-			#dma-channels = <32>;
-			#dma-requests = <96>;
+			dma-channels = <32>;
+			dma-requests = <96>;
 		};
 
 		omap3_pmx_core: pinmux@48002030 {
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 074147c..87401d9 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -223,8 +223,8 @@
 				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 			#dma-cells = <1>;
-			#dma-channels = <32>;
-			#dma-requests = <127>;
+			dma-channels = <32>;
+			dma-requests = <127>;
 		};
 
 		gpio1: gpio@4a310000 {
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi
index 19212ac..de8a3d4 100644
--- a/arch/arm/boot/dts/omap5-core-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi
@@ -13,7 +13,7 @@
 
 core_thermal: core_thermal {
 	polling-delay-passive = <250>; /* milliseconds */
-	polling-delay = <1000>; /* milliseconds */
+	polling-delay = <500>; /* milliseconds */
 
 			/* sensor       ID */
 	thermal-sensors = <&bandgap     2>;
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
index 1b87aca..bc3090f 100644
--- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
+++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi
@@ -13,7 +13,7 @@
 
 gpu_thermal: gpu_thermal {
 	polling-delay-passive = <250>; /* milliseconds */
-	polling-delay = <1000>; /* milliseconds */
+	polling-delay = <500>; /* milliseconds */
 
 			/* sensor       ID */
 	thermal-sensors = <&bandgap     1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index b321fdf..4a485b6 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -238,8 +238,8 @@
 				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 			#dma-cells = <1>;
-			#dma-channels = <32>;
-			#dma-requests = <127>;
+			dma-channels = <32>;
+			dma-requests = <127>;
 		};
 
 		gpio1: gpio@4ae10000 {
@@ -929,8 +929,8 @@
 				      <0x4A096800 0x40>; /* pll_ctrl */
 				reg-names = "phy_rx", "phy_tx", "pll_ctrl";
 				ctrl-module = <&omap_control_sata>;
-				clocks = <&sys_clkin>;
-				clock-names = "sysclk";
+				clocks = <&sys_clkin>, <&sata_ref_clk>;
+				clock-names = "sysclk", "refclk";
 				#phy-cells = <0>;
 			};
 		};
@@ -1079,4 +1079,8 @@
 	};
 };
 
+&cpu_thermal {
+	polling-delay = <500>; /* milliseconds */
+};
+
 /include/ "omap54xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 58c2746..83b425f 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -167,10 +167,18 @@
 		ti,index-starts-at-one;
 	};
 
+	dpll_core_byp_mux: dpll_core_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x012c>;
+	};
+
 	dpll_core_ck: dpll_core_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-core-clock";
-		clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
+		clocks = <&sys_clkin>, <&dpll_core_byp_mux>;
 		reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
 	};
 
@@ -294,10 +302,18 @@
 		clock-div = <1>;
 	};
 
+	dpll_iva_byp_mux: dpll_iva_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x01ac>;
+	};
+
 	dpll_iva_ck: dpll_iva_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
+		clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
 	};
 
@@ -599,10 +615,19 @@
 	};
 };
 &cm_core_clocks {
+
+	dpll_per_byp_mux: dpll_per_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x014c>;
+	};
+
 	dpll_per_ck: dpll_per_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
-		clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
+		clocks = <&sys_clkin>, <&dpll_per_byp_mux>;
 		reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
 	};
 
@@ -714,10 +739,18 @@
 		ti,index-starts-at-one;
 	};
 
+	dpll_usb_byp_mux: dpll_usb_byp_mux {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+		ti,bit-shift = <23>;
+		reg = <0x018c>;
+	};
+
 	dpll_usb_ck: dpll_usb_ck {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-j-type-clock";
-		clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
+		clocks = <&sys_clkin>, <&dpll_usb_byp_mux>;
 		reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
 	};
 
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 261311b..367af53 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1248,7 +1248,6 @@
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
-				atmel,idle-halt;
 				status = "disabled";
 			};
 
@@ -1416,7 +1415,7 @@
 			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
 			reg = <0x00700000 0x100000>;
 			interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
-			clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+			clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
 			clock-names = "usb_clk", "ehci_clk", "uhpck";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index d986b41..4303874 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -66,6 +66,7 @@
 		gpio4 = &pioE;
 		tcb0 = &tcb0;
 		tcb1 = &tcb1;
+		i2c0 = &i2c0;
 		i2c2 = &i2c2;
 	};
 	cpus {
@@ -259,7 +260,7 @@
 			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
 			reg = <0x00600000 0x100000>;
 			interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>;
-			clocks = <&usb>, <&uhphs_clk>, <&uhpck>;
+			clocks = <&utmi>, <&uhphs_clk>, <&uhpck>;
 			clock-names = "usb_clk", "ehci_clk", "uhpck";
 			status = "disabled";
 		};
@@ -461,8 +462,8 @@
 
 					lcdck: lcdck {
 						#clock-cells = <0>;
-						reg = <4>;
-						clocks = <&smd>;
+						reg = <3>;
+						clocks = <&mck>;
 					};
 
 					smdck: smdck {
@@ -770,7 +771,7 @@
 						reg = <50>;
 					};
 
-					lcd_clk: lcd_clk {
+					lcdc_clk: lcdc_clk {
 						#clock-cells = <0>;
 						reg = <51>;
 					};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 252c3d1..9d87609 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -713,6 +713,9 @@
 			reg-shift = <2>;
 			reg-io-width = <4>;
 			clocks = <&l4_sp_clk>;
+			dmas = <&pdma 28>,
+			       <&pdma 29>;
+			dma-names = "tx", "rx";
 		};
 
 		uart1: serial1@ffc03000 {
@@ -722,6 +725,9 @@
 			reg-shift = <2>;
 			reg-io-width = <4>;
 			clocks = <&l4_sp_clk>;
+			dmas = <&pdma 30>,
+			       <&pdma 31>;
+			dma-names = "tx", "rx";
 		};
 
 		rst: rstmgr@ffd05000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 8ca3c1a..5c29258 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -294,35 +294,43 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc0";
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc1";
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc2";
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
 		};
 
 		mmc3_clk: clk@01c20094 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20094 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc3";
+			clock-output-names = "mmc3",
+					     "mmc3_output",
+					     "mmc3_sample";
 		};
 
 		ts_clk: clk@01c20098 {
@@ -468,8 +476,14 @@
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun4i-a10-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <32>;
 			status = "disabled";
 		};
@@ -477,8 +491,14 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun4i-a10-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ahb_gates 9>, <&mmc1_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 9>,
+				 <&mmc1_clk 0>,
+				 <&mmc1_clk 1>,
+				 <&mmc1_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <33>;
 			status = "disabled";
 		};
@@ -486,8 +506,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun4i-a10-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <34>;
 			status = "disabled";
 		};
@@ -495,8 +521,14 @@
 		mmc3: mmc@01c12000 {
 			compatible = "allwinner,sun4i-a10-mmc";
 			reg = <0x01c12000 0x1000>;
-			clocks = <&ahb_gates 11>, <&mmc3_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 11>,
+				 <&mmc3_clk 0>,
+				 <&mmc3_clk 1>,
+				 <&mmc3_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <35>;
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 905f84d..2fd8988 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -218,27 +218,33 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc0";
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc1";
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc2";
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
 		};
 
 		ts_clk: clk@01c20098 {
@@ -368,8 +374,14 @@
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <32>;
 			status = "disabled";
 		};
@@ -377,8 +389,14 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ahb_gates 9>, <&mmc1_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 9>,
+				 <&mmc1_clk 0>,
+				 <&mmc1_clk 1>,
+				 <&mmc1_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <33>;
 			status = "disabled";
 		};
@@ -386,8 +404,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <34>;
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 4910393..f8818f1 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -257,27 +257,33 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc0";
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc1";
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc2";
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
 		};
 
 		ts_clk: clk@01c20098 {
@@ -391,8 +397,14 @@
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <32>;
 			status = "disabled";
 		};
@@ -400,8 +412,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <34>;
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 47e5576..fa2f403 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -190,19 +190,11 @@
 			clock-output-names = "axi";
 		};
 
-		ahb1_mux: ahb1_mux@01c20054 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-			reg = <0x01c20054 0x4>;
-			clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
-			clock-output-names = "ahb1_mux";
-		};
-
 		ahb1: ahb1@01c20054 {
 			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-ahb-clk";
+			compatible = "allwinner,sun6i-a31-ahb1-clk";
 			reg = <0x01c20054 0x4>;
-			clocks = <&ahb1_mux>;
+			clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
 			clock-output-names = "ahb1";
 		};
 
@@ -265,35 +257,43 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
 			clocks = <&osc24M>, <&pll6 0>;
-			clock-output-names = "mmc0";
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
 			clocks = <&osc24M>, <&pll6 0>;
-			clock-output-names = "mmc1";
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
 			clocks = <&osc24M>, <&pll6 0>;
-			clock-output-names = "mmc2";
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
 		};
 
 		mmc3_clk: clk@01c20094 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20094 0x4>;
 			clocks = <&osc24M>, <&pll6 0>;
-			clock-output-names = "mmc3";
+			clock-output-names = "mmc3",
+					     "mmc3_output",
+					     "mmc3_sample";
 		};
 
 		spi0_clk: clk@01c200a0 {
@@ -383,15 +383,21 @@
 			#dma-cells = <1>;
 
 			/* DMA controller requires AHB1 clocked from PLL6 */
-			assigned-clocks = <&ahb1_mux>;
+			assigned-clocks = <&ahb1>;
 			assigned-clock-parents = <&pll6 0>;
 		};
 
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 8>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
@@ -401,8 +407,14 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 9>,
+				 <&mmc1_clk 0>,
+				 <&mmc1_clk 1>,
+				 <&mmc1_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 9>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
@@ -412,8 +424,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 10>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
@@ -423,8 +441,14 @@
 		mmc3: mmc@01c12000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c12000 0x1000>;
-			clocks = <&ahb1_gates 11>, <&mmc3_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 11>,
+				 <&mmc3_clk 0>,
+				 <&mmc3_clk 1>,
+				 <&mmc3_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 11>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 786d491..3a8530b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -337,35 +337,43 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc0";
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc1";
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc2";
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
 		};
 
 		mmc3_clk: clk@01c20094 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20094 0x4>;
 			clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-			clock-output-names = "mmc3";
+			clock-output-names = "mmc3",
+					     "mmc3_output",
+					     "mmc3_sample";
 		};
 
 		ts_clk: clk@01c20098 {
@@ -583,8 +591,14 @@
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
@@ -592,8 +606,14 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ahb_gates 9>, <&mmc1_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 9>,
+				 <&mmc1_clk 0>,
+				 <&mmc1_clk 1>,
+				 <&mmc1_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
@@ -601,8 +621,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
@@ -610,8 +636,14 @@
 		mmc3: mmc@01c12000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c12000 0x1000>;
-			clocks = <&ahb_gates 11>, <&mmc3_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb_gates 11>,
+				 <&mmc3_clk 0>,
+				 <&mmc3_clk 1>,
+				 <&mmc3_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index dd34527..382ebd1 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -119,11 +119,19 @@
 		};
 
 		/* dummy clock until actually implemented */
-		pll6: pll6_clk {
+		pll5: pll5_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
-			clock-frequency = <600000000>;
-			clock-output-names = "pll6";
+			clock-frequency = <0>;
+			clock-output-names = "pll5";
+		};
+
+		pll6: clk@01c20028 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun6i-a31-pll6-clk";
+			reg = <0x01c20028 0x4>;
+			clocks = <&osc24M>;
+			clock-output-names = "pll6", "pll6x2";
 		};
 
 		cpu: cpu_clk@01c20050 {
@@ -149,19 +157,11 @@
 			clock-output-names = "axi";
 		};
 
-		ahb1_mux: ahb1_mux_clk@01c20054 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-			reg = <0x01c20054 0x4>;
-			clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
-			clock-output-names = "ahb1_mux";
-		};
-
 		ahb1: ahb1_clk@01c20054 {
 			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-ahb-clk";
+			compatible = "allwinner,sun6i-a31-ahb1-clk";
 			reg = <0x01c20054 0x4>;
-			clocks = <&ahb1_mux>;
+			clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
 			clock-output-names = "ahb1";
 		};
 
@@ -202,7 +202,7 @@
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-apb1-clk";
 			reg = <0x01c20058 0x4>;
-			clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
+			clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
 			clock-output-names = "apb2";
 		};
 
@@ -218,27 +218,41 @@
 		};
 
 		mmc0_clk: clk@01c20088 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20088 0x4>;
-			clocks = <&osc24M>, <&pll6>;
-			clock-output-names = "mmc0";
+			clocks = <&osc24M>, <&pll6 0>;
+			clock-output-names = "mmc0",
+					     "mmc0_output",
+					     "mmc0_sample";
 		};
 
 		mmc1_clk: clk@01c2008c {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c2008c 0x4>;
-			clocks = <&osc24M>, <&pll6>;
-			clock-output-names = "mmc1";
+			clocks = <&osc24M>, <&pll6 0>;
+			clock-output-names = "mmc1",
+					     "mmc1_output",
+					     "mmc1_sample";
 		};
 
 		mmc2_clk: clk@01c20090 {
-			#clock-cells = <0>;
-			compatible = "allwinner,sun4i-a10-mod0-clk";
+			#clock-cells = <1>;
+			compatible = "allwinner,sun4i-a10-mmc-clk";
 			reg = <0x01c20090 0x4>;
-			clocks = <&osc24M>, <&pll6>;
-			clock-output-names = "mmc2";
+			clocks = <&osc24M>, <&pll6 0>;
+			clock-output-names = "mmc2",
+					     "mmc2_output",
+					     "mmc2_sample";
+		};
+
+		mbus_clk: clk@01c2015c {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun8i-a23-mbus-clk";
+			reg = <0x01c2015c 0x4>;
+			clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+			clock-output-names = "mbus";
 		};
 	};
 
@@ -260,8 +274,14 @@
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 8>,
+				 <&mmc0_clk 0>,
+				 <&mmc0_clk 1>,
+				 <&mmc0_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 8>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
@@ -271,8 +291,14 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 9>,
+				 <&mmc1_clk 0>,
+				 <&mmc1_clk 1>,
+				 <&mmc1_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 9>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
@@ -282,8 +308,14 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun5i-a13-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-			clock-names = "ahb", "mmc";
+			clocks = <&ahb1_gates 10>,
+				 <&mmc2_clk 0>,
+				 <&mmc2_clk 1>,
+				 <&mmc2_clk 2>;
+			clock-names = "ahb",
+				      "mmc",
+				      "output",
+				      "sample";
 			resets = <&ahb1_rst 10>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index f2670f6..811e72b 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -70,6 +70,7 @@
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
+CONFIG_ARM_AT91_ETHER=y
 CONFIG_MACB=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_DM9000=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index e8a4c95..06075b6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -62,6 +62,17 @@
 CONFIG_ARCH_STI=y
 CONFIG_ARCH_EXYNOS=y
 CONFIG_EXYNOS5420_MCPM=y
+CONFIG_ARCH_SHMOBILE_MULTI=y
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_MACH_MARZEN=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
@@ -84,9 +95,11 @@
 CONFIG_PCI_MSI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=16
 CONFIG_HIGHPTE=y
 CONFIG_CMA=y
 CONFIG_ARM_APPENDED_DTB=y
@@ -130,6 +143,7 @@
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
+CONFIG_SIMPLE_PM_BUS=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -157,6 +171,7 @@
 CONFIG_AHCI_TEGRA=y
 CONFIG_SATA_HIGHBANK=y
 CONFIG_SATA_MV=y
+CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_HIX5HD2_GMAC=y
 CONFIG_SUN4I_EMAC=y
@@ -167,14 +182,17 @@
 CONFIG_MVNETA=y
 CONFIG_KS8851=y
 CONFIG_R8169=y
+CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
 CONFIG_MARVELL_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
+CONFIG_MICREL_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
@@ -192,15 +210,18 @@
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MPU3050=y
 CONFIG_INPUT_AXP20X_PEK=y
+CONFIG_INPUT_ADXL34X=m
 CONFIG_SERIO_AMBAKMI=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -213,6 +234,9 @@
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=20
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
@@ -233,19 +257,26 @@
 CONFIG_I2C_MUX_PINCTRL=y
 CONFIG_I2C_CADENCE=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_GPIO=m
 CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_RIIC=y
 CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SH_MOBILE=y
 CONFIG_I2C_SIRF=y
-CONFIG_I2C_TEGRA=y
 CONFIG_I2C_ST=y
-CONFIG_SPI=y
+CONFIG_I2C_TEGRA=y
 CONFIG_I2C_XILINX=y
-CONFIG_SPI_DAVINCI=y
+CONFIG_I2C_RCAR=y
+CONFIG_SPI=y
 CONFIG_SPI_CADENCE=y
+CONFIG_SPI_DAVINCI=y
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
+CONFIG_SPI_RSPI=y
+CONFIG_SPI_SH_MSIOF=m
+CONFIG_SPI_SH_HSPI=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
@@ -259,12 +290,15 @@
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
-CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_DAVINCI=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_EM=y
+CONFIG_GPIO_RCAR=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCF857X=y
 CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
 CONFIG_GPIO_SYSCON=y
@@ -276,10 +310,12 @@
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_SUN6I=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
+CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_DAVINCI_WATCHDOG
 CONFIG_ST_THERMAL_SYSCFG=y
@@ -290,6 +326,7 @@
 CONFIG_ORION_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_MESON_WATCHDOG=y
+CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AXP20X=y
@@ -304,13 +341,16 @@
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
 CONFIG_REGULATOR_AB8500=y
+CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_BCM590XX=y
+CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_MFD_SYSCON=y
 CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_MAX8907=y
+CONFIG_REGULATOR_MAX8973=y
 CONFIG_REGULATOR_MAX77686=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_S2MPS11=y
@@ -324,18 +364,32 @@
 CONFIG_REGULATOR_VEXPRESS=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=m
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_RENESAS_VSP1=m
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7180=m
 CONFIG_DRM=y
+CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
+CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
+CONFIG_FB_SH_MOBILE_MERAM=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_AS3711=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=y
@@ -343,6 +397,8 @@
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SH4_FSI=m
+CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_TEGRA=y
 CONFIG_SND_SOC_TEGRA_RT5640=y
 CONFIG_SND_SOC_TEGRA_WM8753=y
@@ -350,6 +406,8 @@
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
 CONFIG_SND_SOC_TEGRA_ALC5632=y
 CONFIG_SND_SOC_TEGRA_MAX98090=y
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_WM8978=m
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
@@ -362,6 +420,8 @@
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_CHIPIDEA=y
@@ -374,6 +434,10 @@
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
 CONFIG_USB_MXS_PHY=y
+CONFIG_USB_RCAR_PHY=m
+CONFIG_USB_RCAR_GEN2_PHY=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
@@ -392,12 +456,14 @@
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_MVSDIO=y
-CONFIG_MMC_SUNXI=y
+CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_IDMAC=y
 CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SH_MMCIF=y
+CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
@@ -421,10 +487,12 @@
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_RTC_DRV_MAX8907=y
 CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TWL4030=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
+CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_EM3027=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_VT8500=y
@@ -436,6 +504,9 @@
 CONFIG_DW_DMAC=y
 CONFIG_MV_XOR=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_SH_DMAE=y
+CONFIG_RCAR_AUDMAC_PP=m
+CONFIG_RCAR_DMAC=y
 CONFIG_STE_DMA40=y
 CONFIG_SIRF_DMA=y
 CONFIG_TI_EDMA=y
@@ -468,6 +539,7 @@
 CONFIG_XILINX_XADC=y
 CONFIG_AK8975=y
 CONFIG_PWM=y
+CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_PHY_HIX5HD2_SATA=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b738652..8e10859 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -114,6 +114,7 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ECC_BCH=y
 CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_OMAP_BCH=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_OMAP2=y
@@ -248,6 +249,7 @@
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_TI_ABB=y
+CONFIG_REGULATOR_TPS62360=m
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
 CONFIG_REGULATOR_TPS65217=y
@@ -374,7 +376,8 @@
 CONFIG_PWM_TWL=m
 CONFIG_PWM_TWL_LED=m
 CONFIG_OMAP_USB2=m
-CONFIG_TI_PIPE3=m
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 41d856e..510c747 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -3,8 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 38840a8..8f6a570 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -4,6 +4,7 @@
 CONFIG_PERF_EVENTS=y
 CONFIG_ARCH_SUNXI=y
 CONFIG_SMP=y
+CONFIG_NR_CPUS=8
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
 CONFIG_HIGHPTE=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index f489fda..37fe607 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -118,8 +118,8 @@
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_MON=y
-CONFIG_USB_ISP1760_HCD=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_ISP1760=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_NEW_LEDS=y
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 71e5fc7..1d1800f 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,14 +58,18 @@
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__	7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
 #endif
 
 #ifdef __thumb__
 # define adrl adr
 #endif
 
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
 .text
 .syntax	unified 	@ ARMv7-capable assembler is expected to handle this
 #ifdef __thumb2__
@@ -74,8 +78,6 @@
 .code   32
 #endif
 
-.fpu	neon
-
 .type	_bsaes_decrypt8,%function
 .align	4
 _bsaes_decrypt8:
@@ -2095,9 +2097,11 @@
 	vld1.8	{q8}, [r0]			@ initial tweak
 	adr	r2, .Lxts_magic
 
+#ifndef	XTS_CHAIN_TWEAK
 	tst	r9, #0xf			@ if not multiple of 16
 	it	ne				@ Thumb2 thing, sanity check in ARM
 	subne	r9, #0x10			@ subtract another 16 bytes
+#endif
 	subs	r9, #0x80
 
 	blo	.Lxts_dec_short
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index be068db..a4d3856 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,14 +701,18 @@
 # define VFP_ABI_FRAME	0
 # define BSAES_ASM_EXTENDED_KEY
 # define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__	7
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
 #endif
 
 #ifdef __thumb__
 # define adrl adr
 #endif
 
-#if __ARM_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
 .text
 .syntax	unified 	@ ARMv7-capable assembler is expected to handle this
 #ifdef __thumb2__
@@ -717,8 +721,6 @@
 .code   32
 #endif
 
-.fpu	neon
-
 .type	_bsaes_decrypt8,%function
 .align	4
 _bsaes_decrypt8:
@@ -2076,9 +2078,11 @@
 	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
 	adr	$magic, .Lxts_magic
 
+#ifndef	XTS_CHAIN_TWEAK
 	tst	$len, #0xf			@ if not multiple of 16
 	it	ne				@ Thumb2 thing, sanity check in ARM
 	subne	$len, #0x10			@ subtract another 16 bytes
+#endif
 	subs	$len, #0x80
 
 	blo	.Lxts_dec_short
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 37ca2a4..4cf48c3 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -149,31 +149,30 @@
 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
 })
 
+#define kvm_pgd_index(addr)			pgd_index(addr)
+
 static inline bool kvm_page_empty(void *ptr)
 {
 	struct page *ptr_page = virt_to_page(ptr);
 	return page_count(ptr_page) == 1;
 }
 
-
 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
 #define kvm_pud_table_empty(kvm, pudp) (0)
 
 #define KVM_PREALLOC_LEVEL	0
 
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
-{
-	return 0;
-}
-
-static inline void kvm_free_hwpgd(struct kvm *kvm) { }
-
 static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
 	return kvm->arch.pgd;
 }
 
+static inline unsigned int kvm_get_hwpgd_size(void)
+{
+	return PTRS_PER_S2_PGD * sizeof(pgd_t);
+}
+
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
@@ -207,7 +206,7 @@
 
 	bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
 
-	VM_BUG_ON(size & PAGE_MASK);
+	VM_BUG_ON(size & ~PAGE_MASK);
 
 	if (!need_flush && !icache_is_pipt())
 		goto vipt_cache;
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S
index 80a6501..c3c45e6 100644
--- a/arch/arm/include/debug/at91.S
+++ b/arch/arm/include/debug/at91.S
@@ -18,8 +18,11 @@
 #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */
 #endif
 
-/* Keep in sync with mach-at91/include/mach/hardware.h */
+#ifdef CONFIG_MMU
 #define AT91_IO_P2V(x) ((x) - 0x01000000)
+#else
+#define AT91_IO_P2V(x) (x)
+#endif
 
 #define AT91_DBGU_SR		(0x14)	/* Status Register */
 #define AT91_DBGU_THR		(0x1c)	/* Transmitter Holding Register */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index dd9acc9..61b53c4 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -231,7 +231,7 @@
 /*
  * PMU platform driver and devicetree bindings.
  */
-static struct of_device_id cpu_pmu_of_device_ids[] = {
+static const struct of_device_id cpu_pmu_of_device_ids[] = {
 	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
 	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
 	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e55408e..1d60beb 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -246,12 +246,9 @@
 		if (cpu_arch)
 			cpu_arch += CPU_ARCH_ARMv3;
 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
-		unsigned int mmfr0;
-
 		/* Revised CPUID format. Read the Memory Model Feature
 		 * Register 0 and check for VMSAv7 or PMSAv7 */
-		asm("mrc	p15, 0, %0, c0, c1, 4"
-		    : "=r" (mmfr0));
+		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 		    (mmfr0 & 0x000000f0) >= 0x00000030)
 			cpu_arch = CPU_ARCH_ARMv7;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 07e7eb1..5560f74 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -540,7 +540,7 @@
 
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		kvm_guest_exit();
-		trace_kvm_exit(*vcpu_pc(vcpu));
+		trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 		/*
 		 * We may have taken a host interrupt in HYP mode (ie
 		 * while executing the guest). This interrupt is still
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3e6859b..5656d79 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -290,7 +290,7 @@
 	phys_addr_t addr = start, end = start + size;
 	phys_addr_t next;
 
-	pgd = pgdp + pgd_index(addr);
+	pgd = pgdp + kvm_pgd_index(addr);
 	do {
 		next = kvm_pgd_addr_end(addr, end);
 		if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@
 	phys_addr_t next;
 	pgd_t *pgd;
 
-	pgd = kvm->arch.pgd + pgd_index(addr);
+	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
 	do {
 		next = kvm_pgd_addr_end(addr, end);
 		stage2_flush_puds(kvm, pgd, addr, next);
@@ -632,6 +632,20 @@
 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
+/* Free the HW pgd, one page at a time */
+static void kvm_free_hwpgd(void *hwpgd)
+{
+	free_pages_exact(hwpgd, kvm_get_hwpgd_size());
+}
+
+/* Allocate the HW PGD, making sure that each page gets its own refcount */
+static void *kvm_alloc_hwpgd(void)
+{
+	unsigned int size = kvm_get_hwpgd_size();
+
+	return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+}
+
 /**
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:	The KVM struct pointer for the VM.
@@ -645,15 +659,31 @@
  */
 int kvm_alloc_stage2_pgd(struct kvm *kvm)
 {
-	int ret;
 	pgd_t *pgd;
+	void *hwpgd;
 
 	if (kvm->arch.pgd != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
 
+	hwpgd = kvm_alloc_hwpgd();
+	if (!hwpgd)
+		return -ENOMEM;
+
+	/* When the kernel uses more levels of page tables than the
+	 * guest, we allocate a fake PGD and pre-populate it to point
+	 * to the next-level page table, which will be the real
+	 * initial page table pointed to by the VTTBR.
+	 *
+	 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
+	 * the PMD and the kernel will use folded pud.
+	 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
+	 * pages.
+	 */
 	if (KVM_PREALLOC_LEVEL > 0) {
+		int i;
+
 		/*
 		 * Allocate fake pgd for the page table manipulation macros to
 		 * work.  This is not used by the hardware and we have no
@@ -661,30 +691,32 @@
 		 */
 		pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
 				       GFP_KERNEL | __GFP_ZERO);
+
+		if (!pgd) {
+			kvm_free_hwpgd(hwpgd);
+			return -ENOMEM;
+		}
+
+		/* Plug the HW PGD into the fake one. */
+		for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+			if (KVM_PREALLOC_LEVEL == 1)
+				pgd_populate(NULL, pgd + i,
+					     (pud_t *)hwpgd + i * PTRS_PER_PUD);
+			else if (KVM_PREALLOC_LEVEL == 2)
+				pud_populate(NULL, pud_offset(pgd, 0) + i,
+					     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+		}
 	} else {
 		/*
 		 * Allocate actual first-level Stage-2 page table used by the
 		 * hardware for Stage-2 page table walks.
 		 */
-		pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
+		pgd = (pgd_t *)hwpgd;
 	}
 
-	if (!pgd)
-		return -ENOMEM;
-
-	ret = kvm_prealloc_hwpgd(kvm, pgd);
-	if (ret)
-		goto out_err;
-
 	kvm_clean_pgd(pgd);
 	kvm->arch.pgd = pgd;
 	return 0;
-out_err:
-	if (KVM_PREALLOC_LEVEL > 0)
-		kfree(pgd);
-	else
-		free_pages((unsigned long)pgd, S2_PGD_ORDER);
-	return ret;
 }
 
 /**
@@ -785,11 +817,10 @@
 		return;
 
 	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
-	kvm_free_hwpgd(kvm);
+	kvm_free_hwpgd(kvm_get_hwpgd(kvm));
 	if (KVM_PREALLOC_LEVEL > 0)
 		kfree(kvm->arch.pgd);
-	else
-		free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
+
 	kvm->arch.pgd = NULL;
 }
 
@@ -799,7 +830,7 @@
 	pgd_t *pgd;
 	pud_t *pud;
 
-	pgd = kvm->arch.pgd + pgd_index(addr);
+	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
 	if (WARN_ON(pgd_none(*pgd))) {
 		if (!cache)
 			return NULL;
@@ -1089,7 +1120,7 @@
 	pgd_t *pgd;
 	phys_addr_t next;
 
-	pgd = kvm->arch.pgd + pgd_index(addr);
+	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
 	do {
 		/*
 		 * Release kvm_mmu_lock periodically if the memory region is
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 881874b..6817664 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -25,18 +25,22 @@
 );
 
 TRACE_EVENT(kvm_exit,
-	TP_PROTO(unsigned long vcpu_pc),
-	TP_ARGS(vcpu_pc),
+	TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
+	TP_ARGS(exit_reason, vcpu_pc),
 
 	TP_STRUCT__entry(
+		__field(	unsigned int,	exit_reason	)
 		__field(	unsigned long,	vcpu_pc		)
 	),
 
 	TP_fast_assign(
+		__entry->exit_reason		= exit_reason;
 		__entry->vcpu_pc		= vcpu_pc;
 	),
 
-	TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+	TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+		  __entry->exit_reason,
+		  __entry->vcpu_pc)
 );
 
 TRACE_EVENT(kvm_guest_fault,
diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig
index 8423be7..5224120 100644
--- a/arch/arm/mach-asm9260/Kconfig
+++ b/arch/arm/mach-asm9260/Kconfig
@@ -2,5 +2,7 @@
 	bool "Alphascale ASM9260"
 	depends on ARCH_MULTI_V5
 	select CPU_ARM926T
+	select ASM9260_TIMER
+	select GENERIC_CLOCKEVENTS
 	help
 	  Support for Alphascale ASM9260 based platform.
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index c6740e3..c74a443 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -64,7 +64,6 @@
 	select SOC_SAMA5
 	select CLKSRC_MMIO
 	select CACHE_L2X0
-	select CACHE_PL310
 	select HAVE_FB_ATMEL
 	select HAVE_AT91_UTMI
 	select HAVE_AT91_SMD
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 51761f8..b00d095 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -183,7 +183,7 @@
 void __iomem *at91_st_base;
 EXPORT_SYMBOL_GPL(at91_st_base);
 
-static struct of_device_id at91rm9200_st_timer_ids[] = {
+static const struct of_device_id at91rm9200_st_timer_ids[] = {
 	{ .compatible = "atmel,at91rm9200-st" },
 	{ /* sentinel */ }
 };
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index a6e726a..583369f 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -35,10 +35,10 @@
 extern void __init at91sam9g45_pm_init(void);
 extern void __init at91sam9x5_pm_init(void);
 #else
-void __init at91rm9200_pm_init(void) { }
-void __init at91sam9260_pm_init(void) { }
-void __init at91sam9g45_pm_init(void) { }
-void __init at91sam9x5_pm_init(void) { }
+static inline void __init at91rm9200_pm_init(void) { }
+static inline void __init at91sam9260_pm_init(void) { }
+static inline void __init at91sam9g45_pm_init(void) { }
+static inline void __init at91sam9x5_pm_init(void) { }
 #endif
 
 #endif /* _AT91_GENERIC_H */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index af8d8af..aa4116e 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -226,7 +226,7 @@
 	}
 }
 
-static struct of_device_id ramc_ids[] = {
+static const struct of_device_id ramc_ids[] __initconst = {
 	{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
 	{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
@@ -234,7 +234,7 @@
 	{ /*sentinel*/ }
 };
 
-static void at91_dt_ramc(void)
+static __init void at91_dt_ramc(void)
 {
 	struct device_node *np;
 	const struct of_device_id *of_id;
@@ -270,37 +270,35 @@
 	phys_addr_t sram_pbase;
 	unsigned long sram_base;
 	struct device_node *node;
-	struct platform_device *pdev;
+	struct platform_device *pdev = NULL;
 
-	node = of_find_compatible_node(NULL, NULL, "mmio-sram");
-	if (!node) {
-		pr_warn("%s: failed to find sram node!\n", __func__);
-		return;
+	for_each_compatible_node(node, NULL, "mmio-sram") {
+		pdev = of_find_device_by_node(node);
+		if (pdev) {
+			of_node_put(node);
+			break;
+		}
 	}
 
-	pdev = of_find_device_by_node(node);
 	if (!pdev) {
 		pr_warn("%s: failed to find sram device!\n", __func__);
-		goto put_node;
+		return;
 	}
 
 	sram_pool = dev_get_gen_pool(&pdev->dev);
 	if (!sram_pool) {
 		pr_warn("%s: sram pool unavailable!\n", __func__);
-		goto put_node;
+		return;
 	}
 
 	sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz);
 	if (!sram_base) {
 		pr_warn("%s: unable to alloc ocram!\n", __func__);
-		goto put_node;
+		return;
 	}
 
 	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
 	slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false);
-
-put_node:
-	of_node_put(node);
 }
 #endif
 
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index d2c8996..86c0aa8 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -44,7 +44,7 @@
 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
 		"    str    %5, [%1, %2]"
 		:
-		: "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
+		: "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
 		  "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
 		  "r" (lpr));
 }
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 556151e8..931f0e3 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -25,11 +25,6 @@
  */
 #undef SLOWDOWN_MASTER_CLOCK
 
-#define MCKRDY_TIMEOUT		1000
-#define MOSCRDY_TIMEOUT 	1000
-#define PLLALOCK_TIMEOUT	1000
-#define PLLBLOCK_TIMEOUT	1000
-
 pmc	.req	r0
 sdramc	.req	r1
 ramc1	.req	r2
@@ -41,60 +36,42 @@
  * Wait until master clock is ready (after switching master clock source)
  */
 	.macro wait_mckrdy
-	mov	tmp2, #MCKRDY_TIMEOUT
-1:	sub	tmp2, tmp2, #1
-	cmp	tmp2, #0
-	beq	2f
-	ldr	tmp1, [pmc, #AT91_PMC_SR]
+1:	ldr	tmp1, [pmc, #AT91_PMC_SR]
 	tst	tmp1, #AT91_PMC_MCKRDY
 	beq	1b
-2:
 	.endm
 
 /*
  * Wait until master oscillator has stabilized.
  */
 	.macro wait_moscrdy
-	mov	tmp2, #MOSCRDY_TIMEOUT
-1:	sub	tmp2, tmp2, #1
-	cmp	tmp2, #0
-	beq	2f
-	ldr	tmp1, [pmc, #AT91_PMC_SR]
+1:	ldr	tmp1, [pmc, #AT91_PMC_SR]
 	tst	tmp1, #AT91_PMC_MOSCS
 	beq	1b
-2:
 	.endm
 
 /*
  * Wait until PLLA has locked.
  */
 	.macro wait_pllalock
-	mov	tmp2, #PLLALOCK_TIMEOUT
-1:	sub	tmp2, tmp2, #1
-	cmp	tmp2, #0
-	beq	2f
-	ldr	tmp1, [pmc, #AT91_PMC_SR]
+1:	ldr	tmp1, [pmc, #AT91_PMC_SR]
 	tst	tmp1, #AT91_PMC_LOCKA
 	beq	1b
-2:
 	.endm
 
 /*
  * Wait until PLLB has locked.
  */
 	.macro wait_pllblock
-	mov	tmp2, #PLLBLOCK_TIMEOUT
-1:	sub	tmp2, tmp2, #1
-	cmp	tmp2, #0
-	beq	2f
-	ldr	tmp1, [pmc, #AT91_PMC_SR]
+1:	ldr	tmp1, [pmc, #AT91_PMC_SR]
 	tst	tmp1, #AT91_PMC_LOCKB
 	beq	1b
-2:
 	.endm
 
 	.text
 
+	.arm
+
 /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc,
  *			void __iomem *ramc1, int memctrl)
  */
@@ -134,6 +111,16 @@
 	cmp	memctrl, #AT91_MEMCTRL_DDRSDR
 	bne	sdr_sr_enable
 
+	/* LPDDR1 --> force DDR2 mode during self-refresh */
+	ldr	tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+	str	tmp1, .saved_sam9_mdr
+	bic	tmp1, tmp1, #~AT91_DDRSDRC_MD
+	cmp	tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+	ldreq	tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+	biceq	tmp1, tmp1, #AT91_DDRSDRC_MD
+	orreq	tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2
+	streq	tmp1, [sdramc, #AT91_DDRSDRC_MDR]
+
 	/* prepare for DDRAM self-refresh mode */
 	ldr	tmp1, [sdramc, #AT91_DDRSDRC_LPR]
 	str	tmp1, .saved_sam9_lpr
@@ -142,14 +129,26 @@
 
 	/* figure out if we use the second ram controller */
 	cmp	ramc1, #0
-	ldrne	tmp2, [ramc1, #AT91_DDRSDRC_LPR]
-	strne	tmp2, .saved_sam9_lpr1
-	bicne	tmp2, #AT91_DDRSDRC_LPCB
-	orrne	tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
+	beq	ddr_no_2nd_ctrl
+
+	ldr	tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+	str	tmp2, .saved_sam9_mdr1
+	bic	tmp2, tmp2, #~AT91_DDRSDRC_MD
+	cmp	tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR
+	ldreq	tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+	biceq	tmp2, tmp2, #AT91_DDRSDRC_MD
+	orreq	tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2
+	streq	tmp2, [ramc1, #AT91_DDRSDRC_MDR]
+
+	ldr	tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+	str	tmp2, .saved_sam9_lpr1
+	bic	tmp2, #AT91_DDRSDRC_LPCB
+	orr	tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH
 
 	/* Enable DDRAM self-refresh mode */
+	str	tmp2, [ramc1, #AT91_DDRSDRC_LPR]
+ddr_no_2nd_ctrl:
 	str	tmp1, [sdramc, #AT91_DDRSDRC_LPR]
-	strne	tmp2, [ramc1, #AT91_DDRSDRC_LPR]
 
 	b	sdr_sr_done
 
@@ -208,6 +207,7 @@
 	/* Turn off the main oscillator */
 	ldr	tmp1, [pmc, #AT91_CKGR_MOR]
 	bic	tmp1, tmp1, #AT91_PMC_MOSCEN
+	orr	tmp1, tmp1, #AT91_PMC_KEY
 	str	tmp1, [pmc, #AT91_CKGR_MOR]
 
 	/* Wait for interrupt */
@@ -216,6 +216,7 @@
 	/* Turn on the main oscillator */
 	ldr	tmp1, [pmc, #AT91_CKGR_MOR]
 	orr	tmp1, tmp1, #AT91_PMC_MOSCEN
+	orr	tmp1, tmp1, #AT91_PMC_KEY
 	str	tmp1, [pmc, #AT91_CKGR_MOR]
 
 	wait_moscrdy
@@ -280,12 +281,17 @@
 	 */
 	cmp	memctrl, #AT91_MEMCTRL_DDRSDR
 	bne	sdr_en_restore
+	/* Restore MDR in case of LPDDR1 */
+	ldr	tmp1, .saved_sam9_mdr
+	str	tmp1, [sdramc, #AT91_DDRSDRC_MDR]
 	/* Restore LPR on AT91 with DDRAM */
 	ldr	tmp1, .saved_sam9_lpr
 	str	tmp1, [sdramc, #AT91_DDRSDRC_LPR]
 
 	/* if we use the second ram controller */
 	cmp	ramc1, #0
+	ldrne	tmp2, .saved_sam9_mdr1
+	strne	tmp2, [ramc1, #AT91_DDRSDRC_MDR]
 	ldrne	tmp2, .saved_sam9_lpr1
 	strne	tmp2, [ramc1, #AT91_DDRSDRC_LPR]
 
@@ -319,5 +325,11 @@
 .saved_sam9_lpr1:
 	.word 0
 
+.saved_sam9_mdr:
+	.word 0
+
+.saved_sam9_mdr1:
+	.word 0
+
 ENTRY(at91_slow_clock_sz)
 	.word .-at91_slow_clock
diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c
index 19e5a1d..4db76a4 100644
--- a/arch/arm/mach-axxia/axxia.c
+++ b/arch/arm/mach-axxia/axxia.c
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <asm/mach/arch.h>
 
-static const char *axxia_dt_match[] __initconst = {
+static const char *const axxia_dt_match[] __initconst = {
 	"lsi,axm5516",
 	"lsi,axm5516-sim",
 	"lsi,axm5516-emu",
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index aaeec78..8b11f44 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -68,7 +68,7 @@
 	  This enables support for systems based on Broadcom mobile SoCs.
 
 config ARCH_BCM_281XX
-	bool "Broadcom BCM281XX SoC family"
+	bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
 	select ARCH_BCM_MOBILE
 	select HAVE_SMP
 	help
@@ -77,7 +77,7 @@
 	  variants.
 
 config ARCH_BCM_21664
-	bool "Broadcom BCM21664 SoC family"
+	bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
 	select ARCH_BCM_MOBILE
 	select HAVE_SMP
 	help
diff --git a/arch/arm/mach-bcm/brcmstb.c b/arch/arm/mach-bcm/brcmstb.c
index 60a5afa..3a60f7e 100644
--- a/arch/arm/mach-bcm/brcmstb.c
+++ b/arch/arm/mach-bcm/brcmstb.c
@@ -17,7 +17,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-static const char *brcmstb_match[] __initconst = {
+static const char *const brcmstb_match[] __initconst = {
 	"brcm,bcm7445",
 	"brcm,brcmstb",
 	NULL
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 584e8d4..cd30f6f 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -32,12 +32,14 @@
 
 config ARCH_DAVINCI_DA830
 	bool "DA830/OMAP-L137/AM17x based system"
+	depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
 	select ARCH_DAVINCI_DA8XX
 	select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
 	select CP_INTC
 
 config ARCH_DAVINCI_DA850
 	bool "DA850/OMAP-L138/AM18x based system"
+	depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
 	select ARCH_DAVINCI_DA8XX
 	select CP_INTC
 
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index f703d82..438f685 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -20,7 +20,7 @@
 
 #define DA8XX_NUM_UARTS	3
 
-static struct of_device_id da8xx_irq_match[] __initdata = {
+static const struct of_device_id da8xx_irq_match[] __initconst = {
 	{ .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
 	{ }
 };
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index a8eb909..6a2ff0a 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -30,7 +30,7 @@
 /*
  * Sets the DAVINCI MUX register based on the table
  */
-int __init_or_module davinci_cfg_reg(const unsigned long index)
+int davinci_cfg_reg(const unsigned long index)
 {
 	static DEFINE_SPINLOCK(mux_spin_lock);
 	struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -101,7 +101,7 @@
 }
 EXPORT_SYMBOL(davinci_cfg_reg);
 
-int __init_or_module davinci_cfg_reg_list(const short pins[])
+int davinci_cfg_reg_list(const short pins[])
 {
 	int i, error = -EINVAL;
 
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 2013f73..9e9dfdf 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -227,7 +227,7 @@
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static char const *exynos_dt_compat[] __initconst = {
+static char const *const exynos_dt_compat[] __initconst = {
 	"samsung,exynos3",
 	"samsung,exynos3250",
 	"samsung,exynos4",
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 3f32c47..d2e9f12 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,8 +126,7 @@
  */
 void exynos_cpu_power_down(int cpu)
 {
-	if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
-		of_machine_is_compatible("samsung,exynos5800"))) {
+	if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
 		/*
 		 * Bypass power down for CPU0 during suspend. Check for
 		 * the SYS_PWR_REG value to decide if we are suspending
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 20f2671..37266a8 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -161,6 +161,34 @@
 		of_genpd_add_provider_simple(np, &pd->pd);
 	}
 
+	/* Assign the child power domains to their parents */
+	for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+		struct generic_pm_domain *child_domain, *parent_domain;
+		struct of_phandle_args args;
+
+		args.np = np;
+		args.args_count = 0;
+		child_domain = of_genpd_get_from_provider(&args);
+		if (!child_domain)
+			continue;
+
+		if (of_parse_phandle_with_args(np, "power-domains",
+					 "#power-domain-cells", 0, &args) != 0)
+			continue;
+
+		parent_domain = of_genpd_get_from_provider(&args);
+		if (!parent_domain)
+			continue;
+
+		if (pm_genpd_add_subdomain(parent_domain, child_domain))
+			pr_warn("%s failed to add subdomain: %s\n",
+				parent_domain->name, child_domain->name);
+		else
+			pr_info("%s has as child subdomain: %s.\n",
+				parent_domain->name, child_domain->name);
+		of_node_put(np);
+	}
+
 	return 0;
 }
 arch_initcall(exynos4_pm_init_power_domain);
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 666ec3e..318d127 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -87,8 +87,8 @@
 static u32 exynos_irqwake_intmask = 0xffffffff;
 
 static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
-	{ 73, BIT(1) }, /* RTC alarm */
-	{ 74, BIT(2) }, /* RTC tick */
+	{ 105, BIT(1) }, /* RTC alarm */
+	{ 106, BIT(2) }, /* RTC tick */
 	{ /* sentinel */ },
 };
 
@@ -587,7 +587,7 @@
 	.cpu_suspend	= exynos5420_cpu_suspend,
 };
 
-static struct of_device_id exynos_pmu_of_device_ids[] = {
+static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
 	{
 		.compatible = "samsung,exynos3250-pmu",
 		.data = &exynos3250_pm_data,
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 07a0957..231fba0 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -169,7 +169,7 @@
 		platform_device_register(&highbank_cpuidle_device);
 }
 
-static const char *highbank_match[] __initconst = {
+static const char *const highbank_match[] __initconst = {
 	"calxeda,highbank",
 	"calxeda,ecx-2000",
 	NULL,
diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c
index 76b9070..c6bd7c7 100644
--- a/arch/arm/mach-hisi/hisilicon.c
+++ b/arch/arm/mach-hisi/hisilicon.c
@@ -45,7 +45,7 @@
 	iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc));
 }
 
-static const char *hi3xxx_compat[] __initconst = {
+static const char *const hi3xxx_compat[] __initconst = {
 	"hisilicon,hi3620-hi4511",
 	NULL,
 };
@@ -55,7 +55,7 @@
 	.dt_compat	= hi3xxx_compat,
 MACHINE_END
 
-static const char *hix5hd2_compat[] __initconst = {
+static const char *const hix5hd2_compat[] __initconst = {
 	"hisilicon,hix5hd2",
 	NULL,
 };
@@ -64,7 +64,7 @@
 	.dt_compat	= hix5hd2_compat,
 MACHINE_END
 
-static const char *hip04_compat[] __initconst = {
+static const char *const hip04_compat[] __initconst = {
 	"hisilicon,hip04-d01",
 	NULL,
 };
@@ -73,7 +73,7 @@
 	.dt_compat	= hip04_compat,
 MACHINE_END
 
-static const char *hip01_compat[] __initconst = {
+static const char *const hip01_compat[] __initconst = {
 	"hisilicon,hip01",
 	"hisilicon,hip01-ca9x2",
 	NULL,
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 4ad6e47..9de3412 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -211,8 +211,9 @@
 	 * set bit IOMUXC_GPR1[21].  Or the PTP clock must be from pad
 	 * (external OSC), and we need to clear the bit.
 	 */
-	clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
-				       IMX6Q_GPR1_ENET_CLK_SEL_PAD;
+	clksel = clk_is_match(ptp_clk, enet_ref) ?
+				IMX6Q_GPR1_ENET_CLK_SEL_ANATOP :
+				IMX6Q_GPR1_ENET_CLK_SEL_PAD;
 	gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
 	if (!IS_ERR(gpr))
 		regmap_update_bits(gpr, IOMUXC_GPR1,
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index a377f95..0411f06 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -68,7 +68,7 @@
 	return ddr_type;
 }
 
-static struct of_device_id imx_mmdc_dt_ids[] = {
+static const struct of_device_id imx_mmdc_dt_ids[] = {
 	{ .compatible = "fsl,imx6q-mmdc", },
 	{ /* sentinel */ }
 };
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 6a72286..b0243901 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -245,8 +245,10 @@
 }
 
 #define outsb outsb
-static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
+static inline void outsb(u32 io_addr, const void *p, u32 count)
 {
+	const u8 *vaddr = p;
+
 	while (count--)
 		outb(*vaddr++, io_addr);
 }
@@ -262,8 +264,9 @@
 }
 
 #define outsw outsw
-static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
+static inline void outsw(u32 io_addr, const void *p, u32 count)
 {
+	const u16 *vaddr = p;
 	while (count--)
 		outw(cpu_to_le16(*vaddr++), io_addr);
 }
@@ -275,8 +278,9 @@
 }
 
 #define outsl outsl
-static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
+static inline void outsl(u32 io_addr, const void *p, u32 count)
 {
+	const u32 *vaddr = p;
 	while (count--)
 		outl(cpu_to_le32(*vaddr++), io_addr);
 }
@@ -294,8 +298,9 @@
 }
 
 #define insb insb
-static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
+static inline void insb(u32 io_addr, void *p, u32 count)
 {
+	u8 *vaddr = p;
 	while (count--)
 		*vaddr++ = inb(io_addr);
 }
@@ -313,8 +318,9 @@
 }
 
 #define insw insw
-static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
+static inline void insw(u32 io_addr, void *p, u32 count)
 {
+	u16 *vaddr = p;
 	while (count--)
 		*vaddr++ = le16_to_cpu(inw(io_addr));
 }
@@ -330,8 +336,9 @@
 }
 
 #define insl insl
-static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
+static inline void insl(u32 io_addr, void *p, u32 count)
 {
+	u32 *vaddr = p;
 	while (count--)
 		*vaddr++ = le32_to_cpu(inl(io_addr));
 }
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index 7f352de..0662087 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -103,7 +103,7 @@
 	pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
 }
 
-static const char *keystone_match[] __initconst = {
+static const char *const keystone_match[] __initconst = {
 	"ti,keystone",
 	NULL,
 };
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index ef6041e..41bebfd 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -61,7 +61,7 @@
 	.pm_domain = &keystone_pm_domain,
 };
 
-static struct of_device_id of_keystone_table[] = {
+static const struct of_device_id of_keystone_table[] = {
 	{.compatible = "ti,keystone"},
 	{ /* end of list */ },
 };
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 2756351..10bfa03 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -213,7 +213,7 @@
 }
 
 #ifdef CONFIG_OF
-static struct of_device_id mmp_timer_dt_ids[] = {
+static const struct of_device_id mmp_timer_dt_ids[] = {
 	{ .compatible = "mrvl,mmp-timer", },
 	{}
 };
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 61bfe58..fc83204 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -20,6 +20,7 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -46,15 +47,20 @@
 	[1] = {
 		.start	= MSM_GPIO_TO_INT(49),
 		.end	= MSM_GPIO_TO_INT(49),
-		.flags	= IORESOURCE_IRQ,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
 	},
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
 	.name		= "smc91x",
 	.id		= 0,
 	.num_resources	= ARRAY_SIZE(smc91x_resources),
 	.resource	= smc91x_resources,
+	.dev.platform_data = &smc91x_platdata,
 };
 
 static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 4c74861..10016a3 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -22,6 +22,7 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/err.h>
 #include <linux/clkdev.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -49,15 +50,20 @@
 		.flags = IORESOURCE_MEM,
 	},
 	[1] = {
-		.flags = IORESOURCE_IRQ,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
 	},
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
 	.name           = "smc91x",
 	.id             = 0,
 	.num_resources  = ARRAY_SIZE(smc91x_resources),
 	.resource       = smc91x_resources,
+	.dev.platform_data = &smc91x_platdata,
 };
 
 static int __init msm_init_smc91x(void)
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index b5895f0..e46e9ea 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -51,7 +51,7 @@
 	COHERENCY_FABRIC_TYPE_ARMADA_380,
 };
 
-static struct of_device_id of_coherency_table[] = {
+static const struct of_device_id of_coherency_table[] = {
 	{.compatible = "marvell,coherency-fabric",
 	 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
 	{.compatible = "marvell,armada-375-coherency-fabric",
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index d8ab605..8b9f5e2 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -104,7 +104,7 @@
 
 static void *mvebu_cpu_resume;
 
-static struct of_device_id of_pmsu_table[] = {
+static const struct of_device_id of_pmsu_table[] = {
 	{ .compatible = "marvell,armada-370-pmsu", },
 	{ .compatible = "marvell,armada-370-xp-pmsu", },
 	{ .compatible = "marvell,armada-380-pmsu", },
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index a068cb5..c6c132a 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -126,7 +126,7 @@
 		return -ENODEV;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
 void mvebu_armada375_smp_wa_init(void)
 {
 	u32 dev, rev;
diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c
index 3d24ebf..3445a56 100644
--- a/arch/arm/mach-nspire/nspire.c
+++ b/arch/arm/mach-nspire/nspire.c
@@ -27,7 +27,7 @@
 #include "mmio.h"
 #include "clcd.h"
 
-static const char *nspire_dt_match[] __initconst = {
+static const char *const nspire_dt_match[] __initconst = {
 	"ti,nspire",
 	"ti,nspire-cx",
 	"ti,nspire-tp",
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 00d5d8f..b83f18f 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -190,7 +190,7 @@
 obj-$(CONFIG_ARCH_OMAP3)		+= $(clock-common) clock3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)		+= clock34xx.o clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP3)		+= clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= dpll3xxx.o cclock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= dpll3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)		+= clkt_iclk.o
 obj-$(CONFIG_ARCH_OMAP4)		+= $(clock-common)
 obj-$(CONFIG_ARCH_OMAP4)		+= dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
deleted file mode 100644
index e79c80b..0000000
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ /dev/null
@@ -1,3688 +0,0 @@
-/*
- * OMAP3 clock data
- *
- * Copyright (C) 2007-2012 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "clock34xx.h"
-#include "clock36xx.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-#include "prm3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-
-/*
- * clocks
- */
-
-#define OMAP_CM_REGADDR		OMAP34XX_CM_REGADDR
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT		2047
-#define OMAP3630_MAX_JTYPE_DPLL_MULT	4095
-#define OMAP3_MAX_DPLL_DIV		128
-
-DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
-
-static const char *osc_sys_ck_parent_names[] = {
-	"virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
-	"virt_38_4m_ck", "virt_16_8m_ck",
-};
-
-DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
-	       OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
-	       OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
-		   OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
-		   OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct dpll_data dpll3_dd = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.mult_mask	= OMAP3430_CORE_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430_CORE_DPLL_DIV_MASK,
-	.clk_bypass	= &sys_ck,
-	.clk_ref	= &sys_ck,
-	.freqsel_mask	= OMAP3430_CORE_DPLL_FREQSEL_MASK,
-	.control_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_mask	= OMAP3430_EN_CORE_DPLL_MASK,
-	.auto_recal_bit	= OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430_CORE_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-	.autoidle_mask	= OMAP3430_AUTO_CORE_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-	.idlest_mask	= OMAP3430_ST_CORE_CLK_MASK,
-	.max_multiplier	= OMAP3_MAX_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll3_ck;
-
-static const char *dpll3_ck_parent_names[] = {
-	"sys_ck",
-	"sys_ck",
-};
-
-static const struct clk_ops dpll3_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.get_parent	= &omap2_init_dpll_parent,
-	.recalc_rate	= &omap3_dpll_recalc,
-	.round_rate	= &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll3_ck_hw = {
-	.hw = {
-		.clk = &dpll3_ck,
-	},
-	.ops		= &clkhwops_omap3_dpll,
-	.dpll_data	= &dpll3_dd,
-	.clkdm_name	= "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
-		   OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-		   OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
-		   OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk core_ck;
-
-static const char *core_ck_parent_names[] = {
-	"dpll3_m2_ck",
-};
-
-static const struct clk_ops core_ck_ops = {};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
-DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
-
-DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
-		   OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
-		   OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk security_l4_ick2;
-
-static const char *security_l4_ick2_parent_names[] = {
-	"l4_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
-DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
-
-static struct clk aes1_ick;
-
-static const char *aes1_ick_parent_names[] = {
-	"security_l4_ick2",
-};
-
-static const struct clk_ops aes1_ick_ops = {
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes1_ick_hw = {
-	.hw = {
-		.clk = &aes1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-	.enable_bit	= OMAP3430_EN_AES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk core_l4_ick;
-
-static const struct clk_ops core_l4_ick_ops = {
-	.init		= &omap2_init_clk_clkdm,
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk aes2_ick;
-
-static const char *aes2_ick_parent_names[] = {
-	"core_l4_ick",
-};
-
-static const struct clk_ops aes2_ick_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes2_ick_hw = {
-	.hw = {
-		.clk = &aes2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_AES2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk dpll1_fck;
-
-static struct dpll_data dpll1_dd = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-	.mult_mask	= OMAP3430_MPU_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430_MPU_DPLL_DIV_MASK,
-	.clk_bypass	= &dpll1_fck,
-	.clk_ref	= &sys_ck,
-	.freqsel_mask	= OMAP3430_MPU_DPLL_FREQSEL_MASK,
-	.control_reg	= OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
-	.enable_mask	= OMAP3430_EN_MPU_DPLL_MASK,
-	.modes		= (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-	.auto_recal_bit	= OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430_MPU_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-	.autoidle_mask	= OMAP3430_AUTO_MPU_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-	.idlest_mask	= OMAP3430_ST_MPU_CLK_MASK,
-	.max_multiplier	= OMAP3_MAX_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll1_ck;
-
-static const struct clk_ops dpll1_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap3_noncore_dpll_enable,
-	.disable	= &omap3_noncore_dpll_disable,
-	.get_parent	= &omap2_init_dpll_parent,
-	.recalc_rate	= &omap3_dpll_recalc,
-	.set_rate	= &omap3_noncore_dpll_set_rate,
-	.set_parent	= &omap3_noncore_dpll_set_parent,
-	.set_rate_and_parent	= &omap3_noncore_dpll_set_rate_and_parent,
-	.determine_rate	= &omap3_noncore_dpll_determine_rate,
-	.round_rate	= &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll1_ck_hw = {
-	.hw = {
-		.clk = &dpll1_ck,
-	},
-	.ops		= &clkhwops_omap3_dpll,
-	.dpll_data	= &dpll1_dd,
-	.clkdm_name	= "dpll1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
-		   OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
-		   OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
-		   OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk mpu_ck;
-
-static const char *mpu_ck_parent_names[] = {
-	"dpll1_x2m2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
-DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
-		   OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-		   OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
-		   0x0, NULL);
-
-static struct clk cam_ick;
-
-static struct clk_hw_omap cam_ick_hw = {
-	.hw = {
-		.clk = &cam_ick,
-	},
-	.ops		= &clkhwops_iclk,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_CAM_SHIFT,
-	.clkdm_name	= "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd;
-
-static struct dpll_data dpll4_dd_34xx __initdata = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-	.mult_mask	= OMAP3430_PERIPH_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430_PERIPH_DPLL_DIV_MASK,
-	.clk_bypass	= &sys_ck,
-	.clk_ref	= &sys_ck,
-	.freqsel_mask	= OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
-	.control_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_mask	= OMAP3430_EN_PERIPH_DPLL_MASK,
-	.modes		= (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-	.auto_recal_bit	= OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430_PERIPH_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-	.autoidle_mask	= OMAP3430_AUTO_PERIPH_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-	.idlest_mask	= OMAP3430_ST_PERIPH_CLK_MASK,
-	.max_multiplier = OMAP3_MAX_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-};
-
-static struct dpll_data dpll4_dd_3630 __initdata = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-	.mult_mask	= OMAP3630_PERIPH_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430_PERIPH_DPLL_DIV_MASK,
-	.clk_bypass	= &sys_ck,
-	.clk_ref	= &sys_ck,
-	.control_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_mask	= OMAP3430_EN_PERIPH_DPLL_MASK,
-	.modes		= (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-	.auto_recal_bit	= OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430_PERIPH_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-	.autoidle_mask	= OMAP3430_AUTO_PERIPH_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-	.idlest_mask	= OMAP3430_ST_PERIPH_CLK_MASK,
-	.dco_mask	= OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
-	.sddiv_mask	= OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
-	.max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-	.flags		= DPLL_J_TYPE
-};
-
-static struct clk dpll4_ck;
-
-static const struct clk_ops dpll4_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap3_noncore_dpll_enable,
-	.disable	= &omap3_noncore_dpll_disable,
-	.get_parent	= &omap2_init_dpll_parent,
-	.recalc_rate	= &omap3_dpll_recalc,
-	.set_rate	= &omap3_dpll4_set_rate,
-	.set_parent	= &omap3_noncore_dpll_set_parent,
-	.set_rate_and_parent	= &omap3_dpll4_set_rate_and_parent,
-	.determine_rate	= &omap3_noncore_dpll_determine_rate,
-	.round_rate	= &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll4_ck_hw = {
-	.hw = {
-		.clk = &dpll4_ck,
-	},
-	.dpll_data	= &dpll4_dd,
-	.ops		= &clkhwops_omap3_dpll,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
-
-static const struct clk_div_table dpll4_mx_ck_div_table[] = {
-	{ .div = 1, .val = 1 },
-	{ .div = 2, .val = 2 },
-	{ .div = 3, .val = 3 },
-	{ .div = 4, .val = 4 },
-	{ .div = 5, .val = 5 },
-	{ .div = 6, .val = 6 },
-	{ .div = 7, .val = 7 },
-	{ .div = 8, .val = 8 },
-	{ .div = 9, .val = 9 },
-	{ .div = 10, .val = 10 },
-	{ .div = 11, .val = 11 },
-	{ .div = 12, .val = 12 },
-	{ .div = 13, .val = 13 },
-	{ .div = 14, .val = 14 },
-	{ .div = 15, .val = 15 },
-	{ .div = 16, .val = 16 },
-	{ .div = 17, .val = 17 },
-	{ .div = 18, .val = 18 },
-	{ .div = 19, .val = 19 },
-	{ .div = 20, .val = 20 },
-	{ .div = 21, .val = 21 },
-	{ .div = 22, .val = 22 },
-	{ .div = 23, .val = 23 },
-	{ .div = 24, .val = 24 },
-	{ .div = 25, .val = 25 },
-	{ .div = 26, .val = 26 },
-	{ .div = 27, .val = 27 },
-	{ .div = 28, .val = 28 },
-	{ .div = 29, .val = 29 },
-	{ .div = 30, .val = 30 },
-	{ .div = 31, .val = 31 },
-	{ .div = 32, .val = 32 },
-	{ .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m5x2_ck;
-
-static const char *dpll4_m5x2_ck_parent_names[] = {
-	"dpll4_m5_ck",
-};
-
-static const struct clk_ops dpll4_m5x2_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-	.set_rate	= &omap3_clkoutx2_set_rate,
-	.recalc_rate	= &omap3_clkoutx2_recalc,
-	.round_rate	= &omap3_clkoutx2_round_rate,
-};
-
-static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
-	.disable	= &omap2_dflt_clk_disable,
-	.recalc_rate	= &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll4_m5x2_ck_hw = {
-	.hw = {
-		.clk = &dpll4_m5x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_CAM_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
-			dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m5x2_ck_3630 = {
-	.name		= "dpll4_m5x2_ck",
-	.hw		= &dpll4_m5x2_ck_hw.hw,
-	.parent_names	= dpll4_m5x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-	.flags		= CLK_SET_RATE_PARENT,
-};
-
-static struct clk cam_mclk;
-
-static const char *cam_mclk_parent_names[] = {
-	"dpll4_m5x2_ck",
-};
-
-static struct clk_hw_omap cam_mclk_hw = {
-	.hw = {
-		.clk = &cam_mclk,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_CAM_SHIFT,
-	.clkdm_name	= "cam_clkdm",
-};
-
-static struct clk cam_mclk = {
-	.name		= "cam_mclk",
-	.hw		= &cam_mclk_hw.hw,
-	.parent_names	= cam_mclk_parent_names,
-	.num_parents	= ARRAY_SIZE(cam_mclk_parent_names),
-	.ops		= &aes2_ick_ops,
-	.flags		= CLK_SET_RATE_PARENT,
-};
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
-		   OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
-		   OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m2x2_ck;
-
-static const char *dpll4_m2x2_ck_parent_names[] = {
-	"dpll4_m2_ck",
-};
-
-static struct clk_hw_omap dpll4_m2x2_ck_hw = {
-	.hw = {
-		.clk = &dpll4_m2x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_96M_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m2x2_ck_3630 = {
-	.name		= "dpll4_m2x2_ck",
-	.hw		= &dpll4_m2x2_ck_hw.hw,
-	.parent_names	= dpll4_m2x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-};
-
-static struct clk omap_96m_alwon_fck;
-
-static const char *omap_96m_alwon_fck_parent_names[] = {
-	"dpll4_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
-		  core_ck_ops);
-
-static struct clk cm_96m_fck;
-
-static const char *cm_96m_fck_parent_names[] = {
-	"omap_96m_alwon_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
-DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
-		   0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m3x2_ck;
-
-static const char *dpll4_m3x2_ck_parent_names[] = {
-	"dpll4_m3_ck",
-};
-
-static struct clk_hw_omap dpll4_m3x2_ck_hw = {
-	.hw = {
-		.clk = &dpll4_m3x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_TV_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m3x2_ck_3630 = {
-	.name		= "dpll4_m3x2_ck",
-	.hw		= &dpll4_m3x2_ck_hw.hw,
-	.parent_names	= dpll4_m3x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-};
-
-static const char *omap_54m_fck_parent_names[] = {
-	"dpll4_m3x2_ck", "sys_altclk",
-};
-
-DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
-	       OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
-	       OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
-
-static const struct clksel clkout2_src_clksel[] = {
-	{ .parent = &core_ck, .rates = clkout2_src_core_rates },
-	{ .parent = &sys_ck, .rates = clkout2_src_sys_rates },
-	{ .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
-	{ .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
-	{ .parent = NULL },
-};
-
-static const char *clkout2_src_ck_parent_names[] = {
-	"core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
-};
-
-static const struct clk_ops clkout2_src_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.get_parent	= &omap2_clksel_find_parent_index,
-	.set_parent	= &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
-			 clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
-			 OMAP3430_CLKOUT2SOURCE_MASK,
-			 OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
-			 NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
-	{ .div = 2, .val = 0, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
-	{ .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
-	{ .parent = &sys_altclk, .rates = omap_48m_alt_rates },
-	{ .parent = NULL },
-};
-
-static const char *omap_48m_fck_parent_names[] = {
-	"cm_96m_fck", "sys_altclk",
-};
-
-static struct clk omap_48m_fck;
-
-static const struct clk_ops omap_48m_fck_ops = {
-	.recalc_rate	= &omap2_clksel_recalc,
-	.get_parent	= &omap2_clksel_find_parent_index,
-	.set_parent	= &omap2_clksel_set_parent,
-};
-
-static struct clk_hw_omap omap_48m_fck_hw = {
-	.hw = {
-		.clk = &omap_48m_fck,
-	},
-	.clksel		= omap_48m_clksel,
-	.clksel_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP3430_SOURCE_48M_MASK,
-};
-
-DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
-
-static struct clk core_12m_fck;
-
-static const char *core_12m_fck_parent_names[] = {
-	"omap_12m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_48m_fck;
-
-static const char *core_48m_fck_parent_names[] = {
-	"omap_48m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static const char *omap_96m_fck_parent_names[] = {
-	"cm_96m_fck", "sys_ck",
-};
-
-DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
-	       OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-	       OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
-
-static struct clk core_96m_fck;
-
-static const char *core_96m_fck_parent_names[] = {
-	"omap_96m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_l3_ick;
-
-static const char *core_l3_ick_parent_names[] = {
-	"l3_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
-DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
-
-static struct clk corex2_fck;
-
-static const char *corex2_fck_parent_names[] = {
-	"dpll3_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
-DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
-
-static const char *cpefuse_fck_parent_names[] = {
-	"sys_ck",
-};
-
-static struct clk cpefuse_fck;
-
-static struct clk_hw_omap cpefuse_fck_hw = {
-	.hw = {
-		.clk = &cpefuse_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-	.enable_bit	= OMAP3430ES2_EN_CPEFUSE_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk csi2_96m_fck;
-
-static const char *csi2_96m_fck_parent_names[] = {
-	"core_96m_fck",
-};
-
-static struct clk_hw_omap csi2_96m_fck_hw = {
-	.hw = {
-		.clk = &csi2_96m_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_CSI2_SHIFT,
-	.clkdm_name	= "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk d2d_26m_fck;
-
-static struct clk_hw_omap d2d_26m_fck_hw = {
-	.hw = {
-		.clk = &d2d_26m_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430ES1_EN_D2D_SHIFT,
-	.clkdm_name	= "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk des1_ick;
-
-static struct clk_hw_omap des1_ick_hw = {
-	.hw = {
-		.clk = &des1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-	.enable_bit	= OMAP3430_EN_DES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk des2_ick;
-
-static struct clk_hw_omap des2_ick_hw = {
-	.hw = {
-		.clk = &des2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_DES2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
-		   OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-		   OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll2_fck;
-
-static struct dpll_data dpll2_dd = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-	.mult_mask	= OMAP3430_IVA2_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430_IVA2_DPLL_DIV_MASK,
-	.clk_bypass	= &dpll2_fck,
-	.clk_ref	= &sys_ck,
-	.freqsel_mask	= OMAP3430_IVA2_DPLL_FREQSEL_MASK,
-	.control_reg	= OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
-	.enable_mask	= OMAP3430_EN_IVA2_DPLL_MASK,
-	.modes		= ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
-			   (1 << DPLL_LOW_POWER_BYPASS)),
-	.auto_recal_bit	= OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-	.autoidle_mask	= OMAP3430_AUTO_IVA2_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
-	.idlest_mask	= OMAP3430_ST_IVA2_CLK_MASK,
-	.max_multiplier	= OMAP3_MAX_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll2_ck;
-
-static struct clk_hw_omap dpll2_ck_hw = {
-	.hw = {
-		.clk = &dpll2_ck,
-	},
-	.ops		= &clkhwops_omap3_dpll,
-	.dpll_data	= &dpll2_dd,
-	.clkdm_name	= "dpll2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-		   OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
-		   OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
-		   OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll3_m3x2_ck;
-
-static const char *dpll3_m3x2_ck_parent_names[] = {
-	"dpll3_m3_ck",
-};
-
-static struct clk_hw_omap dpll3_m3x2_ck_hw = {
-	.hw = {
-		.clk = &dpll3_m3x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_EMU_CORE_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll3_m3x2_ck_3630 = {
-	.name		= "dpll3_m3x2_ck",
-	.hw		= &dpll3_m3x2_ck_hw.hw,
-	.parent_names	= dpll3_m3x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
-		   0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m4x2_ck;
-
-static const char *dpll4_m4x2_ck_parent_names[] = {
-	"dpll4_m4_ck",
-};
-
-static struct clk_hw_omap dpll4_m4x2_ck_hw = {
-	.hw = {
-		.clk = &dpll4_m4x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_DSS1_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names,
-		dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m4x2_ck_3630 = {
-	.name		= "dpll4_m4x2_ck",
-	.hw		= &dpll4_m4x2_ck_hw.hw,
-	.parent_names	= dpll4_m4x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-	.flags		= CLK_SET_RATE_PARENT,
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m6x2_ck;
-
-static const char *dpll4_m6x2_ck_parent_names[] = {
-	"dpll4_m6_ck",
-};
-
-static struct clk_hw_omap dpll4_m6x2_ck_hw = {
-	.hw = {
-		.clk = &dpll4_m6x2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-	.enable_bit	= OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
-	.flags		= INVERT_ENABLE,
-	.clkdm_name	= "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m6x2_ck_3630 = {
-	.name		= "dpll4_m6x2_ck",
-	.hw		= &dpll4_m6x2_ck_hw.hw,
-	.parent_names	= dpll4_m6x2_ck_parent_names,
-	.num_parents	= ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
-	.ops		= &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
-
-static struct dpll_data dpll5_dd = {
-	.mult_div1_reg	= OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
-	.mult_mask	= OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
-	.div1_mask	= OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
-	.clk_bypass	= &sys_ck,
-	.clk_ref	= &sys_ck,
-	.freqsel_mask	= OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
-	.control_reg	= OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
-	.enable_mask	= OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
-	.modes		= (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-	.auto_recal_bit	= OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
-	.recal_en_bit	= OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
-	.recal_st_bit	= OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
-	.autoidle_reg	= OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
-	.autoidle_mask	= OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
-	.idlest_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
-	.idlest_mask	= OMAP3430ES2_ST_PERIPH2_CLK_MASK,
-	.max_multiplier	= OMAP3_MAX_DPLL_MULT,
-	.min_divider	= 1,
-	.max_divider	= OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll5_ck;
-
-static struct clk_hw_omap dpll5_ck_hw = {
-	.hw = {
-		.clk = &dpll5_ck,
-	},
-	.ops		= &clkhwops_omap3_dpll,
-	.dpll_data	= &dpll5_dd,
-	.clkdm_name	= "dpll5_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
-		   OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
-		   OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dss1_alwon_fck_3430es1;
-
-static const char *dss1_alwon_fck_3430es1_parent_names[] = {
-	"dpll4_m4x2_ck",
-};
-
-static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
-	.hw = {
-		.clk = &dss1_alwon_fck_3430es1,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_DSS1_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1,
-		dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-		CLK_SET_RATE_PARENT);
-
-static struct clk dss1_alwon_fck_3430es2;
-
-static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
-	.hw = {
-		.clk = &dss1_alwon_fck_3430es2,
-	},
-	.ops		= &clkhwops_omap3430es2_dss_usbhost_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_DSS1_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2,
-		dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-		CLK_SET_RATE_PARENT);
-
-static struct clk dss2_alwon_fck;
-
-static struct clk_hw_omap dss2_alwon_fck_hw = {
-	.hw = {
-		.clk = &dss2_alwon_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_DSS2_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_96m_fck;
-
-static struct clk_hw_omap dss_96m_fck_hw = {
-	.hw = {
-		.clk = &dss_96m_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_TV_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es1;
-
-static struct clk_hw_omap dss_ick_3430es1_hw = {
-	.hw = {
-		.clk = &dss_ick_3430es1,
-	},
-	.ops		= &clkhwops_iclk,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es2;
-
-static struct clk_hw_omap dss_ick_3430es2_hw = {
-	.hw = {
-		.clk = &dss_ick_3430es2,
-	},
-	.ops		= &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_tv_fck;
-
-static const char *dss_tv_fck_parent_names[] = {
-	"omap_54m_fck",
-};
-
-static struct clk_hw_omap dss_tv_fck_hw = {
-	.hw = {
-		.clk = &dss_tv_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_TV_SHIFT,
-	.clkdm_name	= "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
-
-static struct clk emac_fck;
-
-static const char *emac_fck_parent_names[] = {
-	"rmii_ck",
-};
-
-static struct clk_hw_omap emac_fck_hw = {
-	.hw = {
-		.clk = &emac_fck,
-	},
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_CPGMAC_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
-
-static struct clk ipss_ick;
-
-static const char *ipss_ick_parent_names[] = {
-	"core_l3_ick",
-};
-
-static struct clk_hw_omap ipss_ick_hw = {
-	.hw = {
-		.clk = &ipss_ick,
-	},
-	.ops		= &clkhwops_am35xx_ipss_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= AM35XX_EN_IPSS_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk emac_ick;
-
-static const char *emac_ick_parent_names[] = {
-	"ipss_ick",
-};
-
-static struct clk_hw_omap emac_ick_hw = {
-	.hw = {
-		.clk = &emac_ick,
-	},
-	.ops		= &clkhwops_am35xx_ipss_module_wait,
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk emu_core_alwon_ck;
-
-static const char *emu_core_alwon_ck_parent_names[] = {
-	"dpll3_m3x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
-DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
-		  core_l4_ick_ops);
-
-static struct clk emu_mpu_alwon_ck;
-
-static const char *emu_mpu_alwon_ck_parent_names[] = {
-	"mpu_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
-DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
-
-static struct clk emu_per_alwon_ck;
-
-static const char *emu_per_alwon_ck_parent_names[] = {
-	"dpll4_m6x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
-DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
-		  core_l4_ick_ops);
-
-static const char *emu_src_ck_parent_names[] = {
-	"sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
-};
-
-static const struct clksel_rate emu_src_sys_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-	{ .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-	{ .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
-	{ .parent = &sys_ck,		.rates = emu_src_sys_rates },
-	{ .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
-	{ .parent = &emu_per_alwon_ck,	.rates = emu_src_per_rates },
-	{ .parent = &emu_mpu_alwon_ck,	.rates = emu_src_mpu_rates },
-	{ .parent = NULL },
-};
-
-static const struct clk_ops emu_src_ck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.get_parent	= &omap2_clksel_find_parent_index,
-	.set_parent	= &omap2_clksel_set_parent,
-	.enable		= &omap2_clkops_enable_clkdm,
-	.disable	= &omap2_clkops_disable_clkdm,
-};
-
-static struct clk emu_src_ck;
-
-static struct clk_hw_omap emu_src_ck_hw = {
-	.hw = {
-		.clk = &emu_src_ck,
-	},
-	.clksel		= emu_src_clksel,
-	.clksel_reg	= OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-	.clksel_mask	= OMAP3430_MUX_CTRL_MASK,
-	.clkdm_name	= "emu_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk fac_ick;
-
-static struct clk_hw_omap fac_ick_hw = {
-	.hw = {
-		.clk = &fac_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430ES1_EN_FAC_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk fshostusb_fck;
-
-static const char *fshostusb_fck_parent_names[] = {
-	"core_48m_fck",
-};
-
-static struct clk_hw_omap fshostusb_fck_hw = {
-	.hw = {
-		.clk = &fshostusb_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ck;
-
-static struct clk_hw_omap gfx_l3_ck_hw = {
-	.hw = {
-		.clk = &gfx_l3_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP_EN_GFX_SHIFT,
-	.clkdm_name	= "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
-		   OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
-		   OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk gfx_cg1_ck;
-
-static const char *gfx_cg1_ck_parent_names[] = {
-	"gfx_l3_fck",
-};
-
-static struct clk_hw_omap gfx_cg1_ck_hw = {
-	.hw = {
-		.clk = &gfx_cg1_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430ES1_EN_2D_SHIFT,
-	.clkdm_name	= "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_cg2_ck;
-
-static struct clk_hw_omap gfx_cg2_ck_hw = {
-	.hw = {
-		.clk = &gfx_cg2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430ES1_EN_3D_SHIFT,
-	.clkdm_name	= "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ick;
-
-static const char *gfx_l3_ick_parent_names[] = {
-	"gfx_l3_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
-DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
-
-static struct clk wkup_32k_fck;
-
-static const char *wkup_32k_fck_parent_names[] = {
-	"omap_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_dbck;
-
-static const char *gpio1_dbck_parent_names[] = {
-	"wkup_32k_fck",
-};
-
-static struct clk_hw_omap gpio1_dbck_hw = {
-	.hw = {
-		.clk = &gpio1_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO1_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wkup_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_ick;
-
-static const char *gpio1_ick_parent_names[] = {
-	"wkup_l4_ick",
-};
-
-static struct clk_hw_omap gpio1_ick_hw = {
-	.hw = {
-		.clk = &gpio1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO1_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_32k_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
-		  core_l4_ick_ops);
-
-static struct clk gpio2_dbck;
-
-static const char *gpio2_dbck_parent_names[] = {
-	"per_32k_alwon_fck",
-};
-
-static struct clk_hw_omap gpio2_dbck_hw = {
-	.hw = {
-		.clk = &gpio2_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO2_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk per_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
-DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk gpio2_ick;
-
-static const char *gpio2_ick_parent_names[] = {
-	"per_l4_ick",
-};
-
-static struct clk_hw_omap gpio2_ick_hw = {
-	.hw = {
-		.clk = &gpio2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO2_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_dbck;
-
-static struct clk_hw_omap gpio3_dbck_hw = {
-	.hw = {
-		.clk = &gpio3_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_ick;
-
-static struct clk_hw_omap gpio3_ick_hw = {
-	.hw = {
-		.clk = &gpio3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_dbck;
-
-static struct clk_hw_omap gpio4_dbck_hw = {
-	.hw = {
-		.clk = &gpio4_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_ick;
-
-static struct clk_hw_omap gpio4_ick_hw = {
-	.hw = {
-		.clk = &gpio4_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_dbck;
-
-static struct clk_hw_omap gpio5_dbck_hw = {
-	.hw = {
-		.clk = &gpio5_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO5_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_ick;
-
-static struct clk_hw_omap gpio5_ick_hw = {
-	.hw = {
-		.clk = &gpio5_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO5_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_dbck;
-
-static struct clk_hw_omap gpio6_dbck_hw = {
-	.hw = {
-		.clk = &gpio6_dbck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO6_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_ick;
-
-static struct clk_hw_omap gpio6_ick_hw = {
-	.hw = {
-		.clk = &gpio6_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPIO6_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpmc_fck;
-
-static struct clk_hw_omap gpmc_fck_hw = {
-	.hw = {
-		.clk = &gpmc_fck,
-	},
-	.flags		= ENABLE_ON_INIT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
-
-static const struct clksel omap343x_gpt_clksel[] = {
-	{ .parent = &omap_32k_fck, .rates = gpt_32k_rates },
-	{ .parent = &sys_ck, .rates = gpt_sys_rates },
-	{ .parent = NULL },
-};
-
-static const char *gpt10_fck_parent_names[] = {
-	"omap_32k_fck", "sys_ck",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT10_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt10_ick;
-
-static struct clk_hw_omap gpt10_ick_hw = {
-	.hw = {
-		.clk = &gpt10_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_GPT10_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT11_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt11_ick;
-
-static struct clk_hw_omap gpt11_ick_hw = {
-	.hw = {
-		.clk = &gpt11_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_GPT11_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpt12_fck;
-
-static const char *gpt12_fck_parent_names[] = {
-	"secure_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpt12_ick;
-
-static struct clk_hw_omap gpt12_ick_hw = {
-	.hw = {
-		.clk = &gpt12_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT12_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT1_MASK,
-			 OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt1_ick;
-
-static struct clk_hw_omap gpt1_ick_hw = {
-	.hw = {
-		.clk = &gpt1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT1_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT2_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt2_ick;
-
-static struct clk_hw_omap gpt2_ick_hw = {
-	.hw = {
-		.clk = &gpt2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT2_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT3_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt3_ick;
-
-static struct clk_hw_omap gpt3_ick_hw = {
-	.hw = {
-		.clk = &gpt3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT4_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt4_ick;
-
-static struct clk_hw_omap gpt4_ick_hw = {
-	.hw = {
-		.clk = &gpt4_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT5_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt5_ick;
-
-static struct clk_hw_omap gpt5_ick_hw = {
-	.hw = {
-		.clk = &gpt5_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT5_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT6_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt6_ick;
-
-static struct clk_hw_omap gpt6_ick_hw = {
-	.hw = {
-		.clk = &gpt6_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT6_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT7_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt7_ick;
-
-static struct clk_hw_omap gpt7_ick_hw = {
-	.hw = {
-		.clk = &gpt7_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT7_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT8_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt8_ick;
-
-static struct clk_hw_omap gpt8_ick_hw = {
-	.hw = {
-		.clk = &gpt8_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT8_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_GPT9_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
-			 gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt9_ick;
-
-static struct clk_hw_omap gpt9_ick_hw = {
-	.hw = {
-		.clk = &gpt9_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_GPT9_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hdq_fck;
-
-static const char *hdq_fck_parent_names[] = {
-	"core_12m_fck",
-};
-
-static struct clk_hw_omap hdq_fck_hw = {
-	.hw = {
-		.clk = &hdq_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_HDQ_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
-
-static struct clk hdq_ick;
-
-static struct clk_hw_omap hdq_ick_hw = {
-	.hw = {
-		.clk = &hdq_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_HDQ_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hecc_ck;
-
-static struct clk_hw_omap hecc_ck_hw = {
-	.hw = {
-		.clk = &hecc_ck,
-	},
-	.ops		= &clkhwops_am35xx_ipss_module_wait,
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_HECC_VBUSP_CLK_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_fck_am35xx;
-
-static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
-	.hw = {
-		.clk = &hsotgusb_fck_am35xx,
-	},
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_USBOTG_FCLK_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es1;
-
-static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
-	.hw = {
-		.clk = &hsotgusb_ick_3430es1,
-	},
-	.ops		= &clkhwops_iclk,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_HSOTGUSB_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es2;
-
-static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
-	.hw = {
-		.clk = &hsotgusb_ick_3430es2,
-	},
-	.ops		= &clkhwops_omap3430es2_iclk_hsotgusb_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_HSOTGUSB_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_am35xx;
-
-static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
-	.hw = {
-		.clk = &hsotgusb_ick_am35xx,
-	},
-	.ops		= &clkhwops_am35xx_ipss_module_wait,
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_USBOTG_VBUSP_CLK_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_fck;
-
-static struct clk_hw_omap i2c1_fck_hw = {
-	.hw = {
-		.clk = &i2c1_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_ick;
-
-static struct clk_hw_omap i2c1_ick_hw = {
-	.hw = {
-		.clk = &i2c1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_fck;
-
-static struct clk_hw_omap i2c2_fck_hw = {
-	.hw = {
-		.clk = &i2c2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_ick;
-
-static struct clk_hw_omap i2c2_ick_hw = {
-	.hw = {
-		.clk = &i2c2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_fck;
-
-static struct clk_hw_omap i2c3_fck_hw = {
-	.hw = {
-		.clk = &i2c3_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_ick;
-
-static struct clk_hw_omap i2c3_ick_hw = {
-	.hw = {
-		.clk = &i2c3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_I2C3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk icr_ick;
-
-static struct clk_hw_omap icr_ick_hw = {
-	.hw = {
-		.clk = &icr_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_ICR_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk iva2_ck;
-
-static const char *iva2_ck_parent_names[] = {
-	"dpll2_m2_ck",
-};
-
-static struct clk_hw_omap iva2_ck_hw = {
-	.hw = {
-		.clk = &iva2_ck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
-	.clkdm_name	= "iva2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
-
-static struct clk mad2d_ick;
-
-static struct clk_hw_omap mad2d_ick_hw = {
-	.hw = {
-		.clk = &mad2d_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-	.enable_bit	= OMAP3430_EN_MAD2D_SHIFT,
-	.clkdm_name	= "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk mailboxes_ick;
-
-static struct clk_hw_omap mailboxes_ick_hw = {
-	.hw = {
-		.clk = &mailboxes_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MAILBOXES_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
-	{ .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
-	{ .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-	{ .parent = NULL },
-};
-
-static const char *mcbsp1_fck_parent_names[] = {
-	"core_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
-			 OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-			 OMAP2_MCBSP1_CLKS_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
-			 mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp1_ick;
-
-static struct clk_hw_omap mcbsp1_ick_hw = {
-	.hw = {
-		.clk = &mcbsp1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCBSP1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_96m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
-
-static const struct clksel mcbsp_234_clksel[] = {
-	{ .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
-	{ .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-	{ .parent = NULL },
-};
-
-static const char *mcbsp2_fck_parent_names[] = {
-	"per_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
-			 OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-			 OMAP2_MCBSP2_CLKS_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
-			 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp2_ick;
-
-static struct clk_hw_omap mcbsp2_ick_hw = {
-	.hw = {
-		.clk = &mcbsp2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_MCBSP2_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
-			 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-			 OMAP2_MCBSP3_CLKS_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
-			 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp3_ick;
-
-static struct clk_hw_omap mcbsp3_ick_hw = {
-	.hw = {
-		.clk = &mcbsp3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_MCBSP3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
-			 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-			 OMAP2_MCBSP4_CLKS_MASK,
-			 OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-			 OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
-			 mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp4_ick;
-
-static struct clk_hw_omap mcbsp4_ick_hw = {
-	.hw = {
-		.clk = &mcbsp4_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_MCBSP4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
-			 OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-			 OMAP2_MCBSP5_CLKS_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
-			 mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp5_ick;
-
-static struct clk_hw_omap mcbsp5_ick_hw = {
-	.hw = {
-		.clk = &mcbsp5_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCBSP5_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_fck;
-
-static struct clk_hw_omap mcspi1_fck_hw = {
-	.hw = {
-		.clk = &mcspi1_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_ick;
-
-static struct clk_hw_omap mcspi1_ick_hw = {
-	.hw = {
-		.clk = &mcspi1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_fck;
-
-static struct clk_hw_omap mcspi2_fck_hw = {
-	.hw = {
-		.clk = &mcspi2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_ick;
-
-static struct clk_hw_omap mcspi2_ick_hw = {
-	.hw = {
-		.clk = &mcspi2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_fck;
-
-static struct clk_hw_omap mcspi3_fck_hw = {
-	.hw = {
-		.clk = &mcspi3_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_ick;
-
-static struct clk_hw_omap mcspi3_ick_hw = {
-	.hw = {
-		.clk = &mcspi3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_fck;
-
-static struct clk_hw_omap mcspi4_fck_hw = {
-	.hw = {
-		.clk = &mcspi4_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI4_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_ick;
-
-static struct clk_hw_omap mcspi4_ick_hw = {
-	.hw = {
-		.clk = &mcspi4_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MCSPI4_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_fck;
-
-static struct clk_hw_omap mmchs1_fck_hw = {
-	.hw = {
-		.clk = &mmchs1_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MMC1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_ick;
-
-static struct clk_hw_omap mmchs1_ick_hw = {
-	.hw = {
-		.clk = &mmchs1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MMC1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_fck;
-
-static struct clk_hw_omap mmchs2_fck_hw = {
-	.hw = {
-		.clk = &mmchs2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MMC2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_ick;
-
-static struct clk_hw_omap mmchs2_ick_hw = {
-	.hw = {
-		.clk = &mmchs2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MMC2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_fck;
-
-static struct clk_hw_omap mmchs3_fck_hw = {
-	.hw = {
-		.clk = &mmchs3_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430ES2_EN_MMC3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_ick;
-
-static struct clk_hw_omap mmchs3_ick_hw = {
-	.hw = {
-		.clk = &mmchs3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430ES2_EN_MMC3_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk modem_fck;
-
-static struct clk_hw_omap modem_fck_hw = {
-	.hw = {
-		.clk = &modem_fck,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MODEM_SHIFT,
-	.clkdm_name	= "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_fck;
-
-static struct clk_hw_omap mspro_fck_hw = {
-	.hw = {
-		.clk = &mspro_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_MSPRO_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_ick;
-
-static struct clk_hw_omap mspro_ick_hw = {
-	.hw = {
-		.clk = &mspro_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_MSPRO_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk omap_192m_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
-		  core_ck_ops);
-
-static struct clk omap_32ksync_ick;
-
-static struct clk_hw_omap omap_32ksync_ick_hw = {
-	.hw = {
-		.clk = &omap_32ksync_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_32KSYNC_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_36XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_36XX },
-	{ .div = 0 }
-};
-
-static const struct clksel omap_96m_alwon_fck_clksel[] = {
-	{ .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
-	{ .parent = NULL }
-};
-
-static struct clk omap_96m_alwon_fck_3630;
-
-static const char *omap_96m_alwon_fck_3630_parent_names[] = {
-	"omap_192m_alwon_fck",
-};
-
-static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
-	.set_rate	= &omap2_clksel_set_rate,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.round_rate	= &omap2_clksel_round_rate,
-};
-
-static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
-	.hw = {
-		.clk = &omap_96m_alwon_fck_3630,
-	},
-	.clksel		= omap_96m_alwon_fck_clksel,
-	.clksel_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-	.clksel_mask	= OMAP3630_CLKSEL_96M_MASK,
-};
-
-static struct clk omap_96m_alwon_fck_3630 = {
-	.name	= "omap_96m_alwon_fck",
-	.hw	= &omap_96m_alwon_fck_3630_hw.hw,
-	.parent_names	= omap_96m_alwon_fck_3630_parent_names,
-	.num_parents	= ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
-	.ops	= &omap_96m_alwon_fck_3630_ops,
-};
-
-static struct clk omapctrl_ick;
-
-static struct clk_hw_omap omapctrl_ick_hw = {
-	.hw = {
-		.clk = &omapctrl_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_OMAPCTRL_SHIFT,
-	.flags		= ENABLE_ON_INIT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk per_48m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk security_l3_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
-DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
-
-static struct clk pka_ick;
-
-static const char *pka_ick_parent_names[] = {
-	"security_l3_ick",
-};
-
-static struct clk_hw_omap pka_ick_hw = {
-	.hw = {
-		.clk = &pka_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-	.enable_bit	= OMAP3430_EN_PKA_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
-		   OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-		   OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
-		   CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk rng_ick;
-
-static struct clk_hw_omap rng_ick_hw = {
-	.hw = {
-		.clk = &rng_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-	.enable_bit	= OMAP3430_EN_RNG_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sad2d_ick;
-
-static struct clk_hw_omap sad2d_ick_hw = {
-	.hw = {
-		.clk = &sad2d_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_SAD2D_SHIFT,
-	.clkdm_name	= "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sdrc_ick;
-
-static struct clk_hw_omap sdrc_ick_hw = {
-	.hw = {
-		.clk = &sdrc_ick,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_SDRC_SHIFT,
-	.flags		= ENABLE_ON_INIT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate sgx_core_rates[] = {
-	{ .div = 2, .val = 5, .flags = RATE_IN_36XX },
-	{ .div = 3, .val = 0, .flags = RATE_IN_3XXX },
-	{ .div = 4, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 6, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate sgx_192m_rates[] = {
-	{ .div = 1, .val = 4, .flags = RATE_IN_36XX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate sgx_corex2_rates[] = {
-	{ .div = 3, .val = 6, .flags = RATE_IN_36XX },
-	{ .div = 5, .val = 7, .flags = RATE_IN_36XX },
-	{ .div = 0 }
-};
-
-static const struct clksel sgx_clksel[] = {
-	{ .parent = &core_ck, .rates = sgx_core_rates },
-	{ .parent = &cm_96m_fck, .rates = sgx_96m_rates },
-	{ .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
-	{ .parent = &corex2_fck, .rates = sgx_corex2_rates },
-	{ .parent = NULL },
-};
-
-static const char *sgx_fck_parent_names[] = {
-	"core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
-};
-
-static struct clk sgx_fck;
-
-static const struct clk_ops sgx_fck_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.set_rate	= &omap2_clksel_set_rate,
-	.round_rate	= &omap2_clksel_round_rate,
-	.get_parent	= &omap2_clksel_find_parent_index,
-	.set_parent	= &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
-			 OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
-			 OMAP3430ES2_CLKSEL_SGX_MASK,
-			 OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
-			 OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
-			 &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
-
-static struct clk sgx_ick;
-
-static struct clk_hw_omap sgx_ick_hw = {
-	.hw = {
-		.clk = &sgx_ick,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
-	.clkdm_name	= "sgx_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sha11_ick;
-
-static struct clk_hw_omap sha11_ick_hw = {
-	.hw = {
-		.clk = &sha11_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-	.enable_bit	= OMAP3430_EN_SHA11_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sha12_ick;
-
-static struct clk_hw_omap sha12_ick_hw = {
-	.hw = {
-		.clk = &sha12_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_SHA12_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk sr1_fck;
-
-static struct clk_hw_omap sr1_fck_hw = {
-	.hw = {
-		.clk = &sr1_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_SR1_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr2_fck;
-
-static struct clk_hw_omap sr2_fck_hw = {
-	.hw = {
-		.clk = &sr2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_SR2_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_ick_3430es1;
-
-static const char *ssi_ick_3430es1_parent_names[] = {
-	"ssi_l4_ick",
-};
-
-static struct clk_hw_omap ssi_ick_3430es1_hw = {
-	.hw = {
-		.clk = &ssi_ick_3430es1,
-	},
-	.ops		= &clkhwops_iclk,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_SSI_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static struct clk ssi_ick_3430es2;
-
-static struct clk_hw_omap ssi_ick_3430es2_hw = {
-	.hw = {
-		.clk = &ssi_ick_3430es2,
-	},
-	.ops		= &clkhwops_omap3430es2_iclk_ssi_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_SSI_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_3XXX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-	{ .div = 6, .val = 6, .flags = RATE_IN_3XXX },
-	{ .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
-	{ .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
-	{ .parent = NULL },
-};
-
-static const char *ssi_ssr_fck_3430es1_parent_names[] = {
-	"corex2_fck",
-};
-
-static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
-	.init		= &omap2_init_clk_clkdm,
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.set_rate	= &omap2_clksel_set_rate,
-	.round_rate	= &omap2_clksel_round_rate,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
-			 ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_SSI_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_SSI_SHIFT,
-			 NULL, ssi_ssr_fck_3430es1_parent_names,
-			 ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
-			 ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-			 OMAP3430_CLKSEL_SSI_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-			 OMAP3430_EN_SSI_SHIFT,
-			 NULL, ssi_ssr_fck_3430es1_parent_names,
-			 ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
-			&ssi_ssr_fck_3430es1, 0x0, 1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
-			&ssi_ssr_fck_3430es2, 0x0, 1, 2);
-
-static struct clk sys_clkout1;
-
-static const char *sys_clkout1_parent_names[] = {
-	"osc_sys_ck",
-};
-
-static struct clk_hw_omap sys_clkout1_hw = {
-	.hw = {
-		.clk = &sys_clkout1,
-	},
-	.enable_reg	= OMAP3430_PRM_CLKOUT_CTRL,
-	.enable_bit	= OMAP3430_CLKOUT_EN_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
-		   OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
-		   OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
-	       OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-	       OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
-	       0x0, NULL);
-
-DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
-		   OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-		   OMAP3430_CLKSEL_TRACECLK_SHIFT,
-		   OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk ts_fck;
-
-static struct clk_hw_omap ts_fck_hw = {
-	.hw = {
-		.clk = &ts_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-	.enable_bit	= OMAP3430ES2_EN_TS_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_fck;
-
-static struct clk_hw_omap uart1_fck_hw = {
-	.hw = {
-		.clk = &uart1_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_UART1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_ick;
-
-static struct clk_hw_omap uart1_ick_hw = {
-	.hw = {
-		.clk = &uart1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_UART1_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart2_fck;
-
-static struct clk_hw_omap uart2_fck_hw = {
-	.hw = {
-		.clk = &uart2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= OMAP3430_EN_UART2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart2_ick;
-
-static struct clk_hw_omap uart2_ick_hw = {
-	.hw = {
-		.clk = &uart2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= OMAP3430_EN_UART2_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart3_fck;
-
-static const char *uart3_fck_parent_names[] = {
-	"per_48m_fck",
-};
-
-static struct clk_hw_omap uart3_fck_hw = {
-	.hw = {
-		.clk = &uart3_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_UART3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart3_ick;
-
-static struct clk_hw_omap uart3_ick_hw = {
-	.hw = {
-		.clk = &uart3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_UART3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck;
-
-static struct clk_hw_omap uart4_fck_hw = {
-	.hw = {
-		.clk = &uart4_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3630_EN_UART4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck_am35xx;
-
-static struct clk_hw_omap uart4_fck_am35xx_hw = {
-	.hw = {
-		.clk = &uart4_fck_am35xx,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-	.enable_bit	= AM35XX_EN_UART4_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick;
-
-static struct clk_hw_omap uart4_ick_hw = {
-	.hw = {
-		.clk = &uart4_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3630_EN_UART4_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick_am35xx;
-
-static struct clk_hw_omap uart4_ick_am35xx_hw = {
-	.hw = {
-		.clk = &uart4_ick_am35xx,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-	.enable_bit	= AM35XX_EN_UART4_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate div2_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel usb_l4_clksel[] = {
-	{ .parent = &l4_ick, .rates = div2_rates },
-	{ .parent = NULL },
-};
-
-static const char *usb_l4_ick_parent_names[] = {
-	"l4_ick",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-			 OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
-			 OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-			 OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-			 &clkhwops_iclk_wait, usb_l4_ick_parent_names,
-			 ssi_ssr_fck_3430es1_ops);
-
-static struct clk usbhost_120m_fck;
-
-static const char *usbhost_120m_fck_parent_names[] = {
-	"dpll5_m2_ck",
-};
-
-static struct clk_hw_omap usbhost_120m_fck_hw = {
-	.hw = {
-		.clk = &usbhost_120m_fck,
-	},
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430ES2_EN_USBHOST2_SHIFT,
-	.clkdm_name	= "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
-		  aes2_ick_ops);
-
-static struct clk usbhost_48m_fck;
-
-static struct clk_hw_omap usbhost_48m_fck_hw = {
-	.hw = {
-		.clk = &usbhost_48m_fck,
-	},
-	.ops		= &clkhwops_omap3430es2_dss_usbhost_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430ES2_EN_USBHOST1_SHIFT,
-	.clkdm_name	= "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbhost_ick;
-
-static struct clk_hw_omap usbhost_ick_hw = {
-	.hw = {
-		.clk = &usbhost_ick,
-	},
-	.ops		= &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430ES2_EN_USBHOST_SHIFT,
-	.clkdm_name	= "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_fck;
-
-static struct clk_hw_omap usbtll_fck_hw = {
-	.hw = {
-		.clk = &usbtll_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-	.enable_bit	= OMAP3430ES2_EN_USBTLL_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_ick;
-
-static struct clk_hw_omap usbtll_ick_hw = {
-	.hw = {
-		.clk = &usbtll_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-	.enable_bit	= OMAP3430ES2_EN_USBTLL_SHIFT,
-	.clkdm_name	= "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate usim_96m_rates[] = {
-	{ .div = 2, .val = 3, .flags = RATE_IN_3XXX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-	{ .div = 8, .val = 5, .flags = RATE_IN_3XXX },
-	{ .div = 10, .val = 6, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
-	{ .div = 4, .val = 7, .flags = RATE_IN_3XXX },
-	{ .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-	{ .div = 16, .val = 9, .flags = RATE_IN_3XXX },
-	{ .div = 20, .val = 10, .flags = RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-static const struct clksel usim_clksel[] = {
-	{ .parent = &omap_96m_fck, .rates = usim_96m_rates },
-	{ .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
-	{ .parent = &sys_ck, .rates = div2_rates },
-	{ .parent = NULL },
-};
-
-static const char *usim_fck_parent_names[] = {
-	"omap_96m_fck", "dpll5_m2_ck", "sys_ck",
-};
-
-static struct clk usim_fck;
-
-static const struct clk_ops usim_fck_ops = {
-	.enable		= &omap2_dflt_clk_enable,
-	.disable	= &omap2_dflt_clk_disable,
-	.is_enabled	= &omap2_dflt_clk_is_enabled,
-	.recalc_rate	= &omap2_clksel_recalc,
-	.get_parent	= &omap2_clksel_find_parent_index,
-	.set_parent	= &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
-			 OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-			 OMAP3430ES2_CLKSEL_USIMOCP_MASK,
-			 OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-			 OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
-			 usim_fck_parent_names, usim_fck_ops);
-
-static struct clk usim_ick;
-
-static struct clk_hw_omap usim_ick_hw = {
-	.hw = {
-		.clk = &usim_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430ES2_EN_USIMOCP_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk vpfe_fck;
-
-static const char *vpfe_fck_parent_names[] = {
-	"pclk_ck",
-};
-
-static struct clk_hw_omap vpfe_fck_hw = {
-	.hw = {
-		.clk = &vpfe_fck,
-	},
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_VPFE_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
-
-static struct clk vpfe_ick;
-
-static struct clk_hw_omap vpfe_ick_hw = {
-	.hw = {
-		.clk = &vpfe_ick,
-	},
-	.ops		= &clkhwops_am35xx_ipss_module_wait,
-	.enable_reg	= OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-	.enable_bit	= AM35XX_VPFE_VBUSP_CLK_SHIFT,
-	.clkdm_name	= "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt1_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk wdt1_ick;
-
-static struct clk_hw_omap wdt1_ick_hw = {
-	.hw = {
-		.clk = &wdt1_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_WDT1_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_fck;
-
-static struct clk_hw_omap wdt2_fck_hw = {
-	.hw = {
-		.clk = &wdt2_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_WDT2_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_ick;
-
-static struct clk_hw_omap wdt2_ick_hw = {
-	.hw = {
-		.clk = &wdt2_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_WDT2_SHIFT,
-	.clkdm_name	= "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_fck;
-
-static struct clk_hw_omap wdt3_fck_hw = {
-	.hw = {
-		.clk = &wdt3_fck,
-	},
-	.ops		= &clkhwops_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-	.enable_bit	= OMAP3430_EN_WDT3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_ick;
-
-static struct clk_hw_omap wdt3_ick_hw = {
-	.hw = {
-		.clk = &wdt3_ick,
-	},
-	.ops		= &clkhwops_iclk_wait,
-	.enable_reg	= OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-	.enable_bit	= OMAP3430_EN_WDT3_SHIFT,
-	.clkdm_name	= "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-/*
- * clocks specific to omap3430es1
- */
-static struct omap_clk omap3430es1_clks[] = {
-	CLK(NULL,	"gfx_l3_ck",	&gfx_l3_ck),
-	CLK(NULL,	"gfx_l3_fck",	&gfx_l3_fck),
-	CLK(NULL,	"gfx_l3_ick",	&gfx_l3_ick),
-	CLK(NULL,	"gfx_cg1_ck",	&gfx_cg1_ck),
-	CLK(NULL,	"gfx_cg2_ck",	&gfx_cg2_ck),
-	CLK(NULL,	"d2d_26m_fck",	&d2d_26m_fck),
-	CLK(NULL,	"fshostusb_fck", &fshostusb_fck),
-	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck_3430es1),
-	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es1),
-	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es1),
-	CLK(NULL,	"hsotgusb_ick",	&hsotgusb_ick_3430es1),
-	CLK(NULL,	"fac_ick",	&fac_ick),
-	CLK(NULL,	"ssi_ick",	&ssi_ick_3430es1),
-	CLK(NULL,	"usb_l4_ick",	&usb_l4_ick),
-	CLK(NULL,	"dss1_alwon_fck",	&dss1_alwon_fck_3430es1),
-	CLK("omapdss_dss",	"ick",		&dss_ick_3430es1),
-	CLK(NULL,	"dss_ick",		&dss_ick_3430es1),
-};
-
-/*
- * clocks specific to am35xx
- */
-static struct omap_clk am35xx_clks[] = {
-	CLK(NULL,	"ipss_ick",	&ipss_ick),
-	CLK(NULL,	"rmii_ck",	&rmii_ck),
-	CLK(NULL,	"pclk_ck",	&pclk_ck),
-	CLK(NULL,	"emac_ick",	&emac_ick),
-	CLK(NULL,	"emac_fck",	&emac_fck),
-	CLK("davinci_emac.0",	NULL,	&emac_ick),
-	CLK("davinci_mdio.0",	NULL,	&emac_fck),
-	CLK("vpfe-capture",	"master",	&vpfe_ick),
-	CLK("vpfe-capture",	"slave",	&vpfe_fck),
-	CLK(NULL,	"hsotgusb_ick",		&hsotgusb_ick_am35xx),
-	CLK(NULL,	"hsotgusb_fck",		&hsotgusb_fck_am35xx),
-	CLK(NULL,	"hecc_ck",	&hecc_ck),
-	CLK(NULL,	"uart4_ick",	&uart4_ick_am35xx),
-	CLK(NULL,	"uart4_fck",	&uart4_fck_am35xx),
-};
-
-/*
- * clocks specific to omap36xx
- */
-static struct omap_clk omap36xx_clks[] = {
-	CLK(NULL,	"omap_192m_alwon_fck", &omap_192m_alwon_fck),
-	CLK(NULL,	"uart4_fck",	&uart4_fck),
-};
-
-/*
- * clocks common to omap36xx omap34xx
- */
-static struct omap_clk omap34xx_omap36xx_clks[] = {
-	CLK(NULL,	"aes1_ick",	&aes1_ick),
-	CLK("omap_rng",	"ick",		&rng_ick),
-	CLK("omap3-rom-rng",	"ick",	&rng_ick),
-	CLK(NULL,	"sha11_ick",	&sha11_ick),
-	CLK(NULL,	"des1_ick",	&des1_ick),
-	CLK(NULL,	"cam_mclk",	&cam_mclk),
-	CLK(NULL,	"cam_ick",	&cam_ick),
-	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck),
-	CLK(NULL,	"security_l3_ick", &security_l3_ick),
-	CLK(NULL,	"pka_ick",	&pka_ick),
-	CLK(NULL,	"icr_ick",	&icr_ick),
-	CLK("omap-aes",	"ick",	&aes2_ick),
-	CLK("omap-sham",	"ick",	&sha12_ick),
-	CLK(NULL,	"des2_ick",	&des2_ick),
-	CLK(NULL,	"mspro_ick",	&mspro_ick),
-	CLK(NULL,	"mailboxes_ick", &mailboxes_ick),
-	CLK(NULL,	"ssi_l4_ick",	&ssi_l4_ick),
-	CLK(NULL,	"sr1_fck",	&sr1_fck),
-	CLK(NULL,	"sr2_fck",	&sr2_fck),
-	CLK(NULL,	"sr_l4_ick",	&sr_l4_ick),
-	CLK(NULL,	"security_l4_ick2", &security_l4_ick2),
-	CLK(NULL,	"wkup_l4_ick",	&wkup_l4_ick),
-	CLK(NULL,	"dpll2_fck",	&dpll2_fck),
-	CLK(NULL,	"iva2_ck",	&iva2_ck),
-	CLK(NULL,	"modem_fck",	&modem_fck),
-	CLK(NULL,	"sad2d_ick",	&sad2d_ick),
-	CLK(NULL,	"mad2d_ick",	&mad2d_ick),
-	CLK(NULL,	"mspro_fck",	&mspro_fck),
-	CLK(NULL,	"dpll2_ck",	&dpll2_ck),
-	CLK(NULL,	"dpll2_m2_ck",	&dpll2_m2_ck),
-};
-
-/*
- * clocks common to omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_omap3430es2plus_clks[] = {
-	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck_3430es2),
-	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es2),
-	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es2),
-	CLK(NULL,	"hsotgusb_ick",	&hsotgusb_ick_3430es2),
-	CLK(NULL,	"ssi_ick",	&ssi_ick_3430es2),
-	CLK(NULL,	"usim_fck",	&usim_fck),
-	CLK(NULL,	"usim_ick",	&usim_ick),
-};
-
-/*
- * clocks common to am35xx omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
-	CLK(NULL,	"virt_16_8m_ck", &virt_16_8m_ck),
-	CLK(NULL,	"dpll5_ck",	&dpll5_ck),
-	CLK(NULL,	"dpll5_m2_ck",	&dpll5_m2_ck),
-	CLK(NULL,	"sgx_fck",	&sgx_fck),
-	CLK(NULL,	"sgx_ick",	&sgx_ick),
-	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck),
-	CLK(NULL,	"ts_fck",	&ts_fck),
-	CLK(NULL,	"usbtll_fck",	&usbtll_fck),
-	CLK(NULL,	"usbtll_ick",	&usbtll_ick),
-	CLK("omap_hsmmc.2",	"ick",	&mmchs3_ick),
-	CLK(NULL,	"mmchs3_ick",	&mmchs3_ick),
-	CLK(NULL,	"mmchs3_fck",	&mmchs3_fck),
-	CLK(NULL,	"dss1_alwon_fck",	&dss1_alwon_fck_3430es2),
-	CLK("omapdss_dss",	"ick",		&dss_ick_3430es2),
-	CLK(NULL,	"dss_ick",		&dss_ick_3430es2),
-	CLK(NULL,	"usbhost_120m_fck", &usbhost_120m_fck),
-	CLK(NULL,	"usbhost_48m_fck", &usbhost_48m_fck),
-	CLK(NULL,	"usbhost_ick",	&usbhost_ick),
-};
-
-/*
- * common clocks
- */
-static struct omap_clk omap3xxx_clks[] = {
-	CLK(NULL,	"apb_pclk",	&dummy_apb_pclk),
-	CLK(NULL,	"omap_32k_fck",	&omap_32k_fck),
-	CLK(NULL,	"virt_12m_ck",	&virt_12m_ck),
-	CLK(NULL,	"virt_13m_ck",	&virt_13m_ck),
-	CLK(NULL,	"virt_19200000_ck", &virt_19200000_ck),
-	CLK(NULL,	"virt_26000000_ck", &virt_26000000_ck),
-	CLK(NULL,	"virt_38_4m_ck", &virt_38_4m_ck),
-	CLK(NULL,	"osc_sys_ck",	&osc_sys_ck),
-	CLK("twl",	"fck",		&osc_sys_ck),
-	CLK(NULL,	"sys_ck",	&sys_ck),
-	CLK(NULL,	"omap_96m_alwon_fck", &omap_96m_alwon_fck),
-	CLK("etb",	"emu_core_alwon_ck", &emu_core_alwon_ck),
-	CLK(NULL,	"sys_altclk",	&sys_altclk),
-	CLK(NULL,	"mcbsp_clks",	&mcbsp_clks),
-	CLK(NULL,	"sys_clkout1",	&sys_clkout1),
-	CLK(NULL,	"dpll1_ck",	&dpll1_ck),
-	CLK(NULL,	"dpll1_x2_ck",	&dpll1_x2_ck),
-	CLK(NULL,	"dpll1_x2m2_ck", &dpll1_x2m2_ck),
-	CLK(NULL,	"dpll3_ck",	&dpll3_ck),
-	CLK(NULL,	"core_ck",	&core_ck),
-	CLK(NULL,	"dpll3_x2_ck",	&dpll3_x2_ck),
-	CLK(NULL,	"dpll3_m2_ck",	&dpll3_m2_ck),
-	CLK(NULL,	"dpll3_m2x2_ck", &dpll3_m2x2_ck),
-	CLK(NULL,	"dpll3_m3_ck",	&dpll3_m3_ck),
-	CLK(NULL,	"dpll3_m3x2_ck", &dpll3_m3x2_ck),
-	CLK(NULL,	"dpll4_ck",	&dpll4_ck),
-	CLK(NULL,	"dpll4_x2_ck",	&dpll4_x2_ck),
-	CLK(NULL,	"omap_96m_fck",	&omap_96m_fck),
-	CLK(NULL,	"cm_96m_fck",	&cm_96m_fck),
-	CLK(NULL,	"omap_54m_fck",	&omap_54m_fck),
-	CLK(NULL,	"omap_48m_fck",	&omap_48m_fck),
-	CLK(NULL,	"omap_12m_fck",	&omap_12m_fck),
-	CLK(NULL,	"dpll4_m2_ck",	&dpll4_m2_ck),
-	CLK(NULL,	"dpll4_m2x2_ck", &dpll4_m2x2_ck),
-	CLK(NULL,	"dpll4_m3_ck",	&dpll4_m3_ck),
-	CLK(NULL,	"dpll4_m3x2_ck", &dpll4_m3x2_ck),
-	CLK(NULL,	"dpll4_m4_ck",	&dpll4_m4_ck),
-	CLK(NULL,	"dpll4_m4x2_ck", &dpll4_m4x2_ck),
-	CLK(NULL,	"dpll4_m5_ck",	&dpll4_m5_ck),
-	CLK(NULL,	"dpll4_m5x2_ck", &dpll4_m5x2_ck),
-	CLK(NULL,	"dpll4_m6_ck",	&dpll4_m6_ck),
-	CLK(NULL,	"dpll4_m6x2_ck", &dpll4_m6x2_ck),
-	CLK("etb",	"emu_per_alwon_ck", &emu_per_alwon_ck),
-	CLK(NULL,	"clkout2_src_ck", &clkout2_src_ck),
-	CLK(NULL,	"sys_clkout2",	&sys_clkout2),
-	CLK(NULL,	"corex2_fck",	&corex2_fck),
-	CLK(NULL,	"dpll1_fck",	&dpll1_fck),
-	CLK(NULL,	"mpu_ck",	&mpu_ck),
-	CLK(NULL,	"arm_fck",	&arm_fck),
-	CLK("etb",	"emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
-	CLK(NULL,	"l3_ick",	&l3_ick),
-	CLK(NULL,	"l4_ick",	&l4_ick),
-	CLK(NULL,	"rm_ick",	&rm_ick),
-	CLK(NULL,	"gpt10_fck",	&gpt10_fck),
-	CLK(NULL,	"gpt11_fck",	&gpt11_fck),
-	CLK(NULL,	"core_96m_fck",	&core_96m_fck),
-	CLK(NULL,	"mmchs2_fck",	&mmchs2_fck),
-	CLK(NULL,	"mmchs1_fck",	&mmchs1_fck),
-	CLK(NULL,	"i2c3_fck",	&i2c3_fck),
-	CLK(NULL,	"i2c2_fck",	&i2c2_fck),
-	CLK(NULL,	"i2c1_fck",	&i2c1_fck),
-	CLK(NULL,	"mcbsp5_fck",	&mcbsp5_fck),
-	CLK(NULL,	"mcbsp1_fck",	&mcbsp1_fck),
-	CLK(NULL,	"core_48m_fck",	&core_48m_fck),
-	CLK(NULL,	"mcspi4_fck",	&mcspi4_fck),
-	CLK(NULL,	"mcspi3_fck",	&mcspi3_fck),
-	CLK(NULL,	"mcspi2_fck",	&mcspi2_fck),
-	CLK(NULL,	"mcspi1_fck",	&mcspi1_fck),
-	CLK(NULL,	"uart2_fck",	&uart2_fck),
-	CLK(NULL,	"uart1_fck",	&uart1_fck),
-	CLK(NULL,	"core_12m_fck",	&core_12m_fck),
-	CLK("omap_hdq.0",	"fck",	&hdq_fck),
-	CLK(NULL,	"hdq_fck",	&hdq_fck),
-	CLK(NULL,	"core_l3_ick",	&core_l3_ick),
-	CLK(NULL,	"sdrc_ick",	&sdrc_ick),
-	CLK(NULL,	"gpmc_fck",	&gpmc_fck),
-	CLK(NULL,	"core_l4_ick",	&core_l4_ick),
-	CLK("omap_hsmmc.1",	"ick",	&mmchs2_ick),
-	CLK("omap_hsmmc.0",	"ick",	&mmchs1_ick),
-	CLK(NULL,	"mmchs2_ick",	&mmchs2_ick),
-	CLK(NULL,	"mmchs1_ick",	&mmchs1_ick),
-	CLK("omap_hdq.0", "ick",	&hdq_ick),
-	CLK(NULL,	"hdq_ick",	&hdq_ick),
-	CLK("omap2_mcspi.4", "ick",	&mcspi4_ick),
-	CLK("omap2_mcspi.3", "ick",	&mcspi3_ick),
-	CLK("omap2_mcspi.2", "ick",	&mcspi2_ick),
-	CLK("omap2_mcspi.1", "ick",	&mcspi1_ick),
-	CLK(NULL,	"mcspi4_ick",	&mcspi4_ick),
-	CLK(NULL,	"mcspi3_ick",	&mcspi3_ick),
-	CLK(NULL,	"mcspi2_ick",	&mcspi2_ick),
-	CLK(NULL,	"mcspi1_ick",	&mcspi1_ick),
-	CLK("omap_i2c.3", "ick",	&i2c3_ick),
-	CLK("omap_i2c.2", "ick",	&i2c2_ick),
-	CLK("omap_i2c.1", "ick",	&i2c1_ick),
-	CLK(NULL,	"i2c3_ick",	&i2c3_ick),
-	CLK(NULL,	"i2c2_ick",	&i2c2_ick),
-	CLK(NULL,	"i2c1_ick",	&i2c1_ick),
-	CLK(NULL,	"uart2_ick",	&uart2_ick),
-	CLK(NULL,	"uart1_ick",	&uart1_ick),
-	CLK(NULL,	"gpt11_ick",	&gpt11_ick),
-	CLK(NULL,	"gpt10_ick",	&gpt10_ick),
-	CLK("omap-mcbsp.5", "ick",	&mcbsp5_ick),
-	CLK("omap-mcbsp.1", "ick",	&mcbsp1_ick),
-	CLK(NULL,	"mcbsp5_ick",	&mcbsp5_ick),
-	CLK(NULL,	"mcbsp1_ick",	&mcbsp1_ick),
-	CLK(NULL,	"omapctrl_ick",	&omapctrl_ick),
-	CLK(NULL,	"dss_tv_fck",	&dss_tv_fck),
-	CLK(NULL,	"dss_96m_fck",	&dss_96m_fck),
-	CLK(NULL,	"dss2_alwon_fck",	&dss2_alwon_fck),
-	CLK(NULL,	"init_60m_fclk",	&dummy_ck),
-	CLK(NULL,	"gpt1_fck",	&gpt1_fck),
-	CLK(NULL,	"aes2_ick",	&aes2_ick),
-	CLK(NULL,	"wkup_32k_fck",	&wkup_32k_fck),
-	CLK(NULL,	"gpio1_dbck",	&gpio1_dbck),
-	CLK(NULL,	"sha12_ick",	&sha12_ick),
-	CLK(NULL,	"wdt2_fck",		&wdt2_fck),
-	CLK("omap_wdt",	"ick",		&wdt2_ick),
-	CLK(NULL,	"wdt2_ick",	&wdt2_ick),
-	CLK(NULL,	"wdt1_ick",	&wdt1_ick),
-	CLK(NULL,	"gpio1_ick",	&gpio1_ick),
-	CLK(NULL,	"omap_32ksync_ick", &omap_32ksync_ick),
-	CLK(NULL,	"gpt12_ick",	&gpt12_ick),
-	CLK(NULL,	"gpt1_ick",	&gpt1_ick),
-	CLK(NULL,	"per_96m_fck",	&per_96m_fck),
-	CLK(NULL,	"per_48m_fck",	&per_48m_fck),
-	CLK(NULL,	"uart3_fck",	&uart3_fck),
-	CLK(NULL,	"gpt2_fck",	&gpt2_fck),
-	CLK(NULL,	"gpt3_fck",	&gpt3_fck),
-	CLK(NULL,	"gpt4_fck",	&gpt4_fck),
-	CLK(NULL,	"gpt5_fck",	&gpt5_fck),
-	CLK(NULL,	"gpt6_fck",	&gpt6_fck),
-	CLK(NULL,	"gpt7_fck",	&gpt7_fck),
-	CLK(NULL,	"gpt8_fck",	&gpt8_fck),
-	CLK(NULL,	"gpt9_fck",	&gpt9_fck),
-	CLK(NULL,	"per_32k_alwon_fck", &per_32k_alwon_fck),
-	CLK(NULL,	"gpio6_dbck",	&gpio6_dbck),
-	CLK(NULL,	"gpio5_dbck",	&gpio5_dbck),
-	CLK(NULL,	"gpio4_dbck",	&gpio4_dbck),
-	CLK(NULL,	"gpio3_dbck",	&gpio3_dbck),
-	CLK(NULL,	"gpio2_dbck",	&gpio2_dbck),
-	CLK(NULL,	"wdt3_fck",	&wdt3_fck),
-	CLK(NULL,	"per_l4_ick",	&per_l4_ick),
-	CLK(NULL,	"gpio6_ick",	&gpio6_ick),
-	CLK(NULL,	"gpio5_ick",	&gpio5_ick),
-	CLK(NULL,	"gpio4_ick",	&gpio4_ick),
-	CLK(NULL,	"gpio3_ick",	&gpio3_ick),
-	CLK(NULL,	"gpio2_ick",	&gpio2_ick),
-	CLK(NULL,	"wdt3_ick",	&wdt3_ick),
-	CLK(NULL,	"uart3_ick",	&uart3_ick),
-	CLK(NULL,	"uart4_ick",	&uart4_ick),
-	CLK(NULL,	"gpt9_ick",	&gpt9_ick),
-	CLK(NULL,	"gpt8_ick",	&gpt8_ick),
-	CLK(NULL,	"gpt7_ick",	&gpt7_ick),
-	CLK(NULL,	"gpt6_ick",	&gpt6_ick),
-	CLK(NULL,	"gpt5_ick",	&gpt5_ick),
-	CLK(NULL,	"gpt4_ick",	&gpt4_ick),
-	CLK(NULL,	"gpt3_ick",	&gpt3_ick),
-	CLK(NULL,	"gpt2_ick",	&gpt2_ick),
-	CLK("omap-mcbsp.2", "ick",	&mcbsp2_ick),
-	CLK("omap-mcbsp.3", "ick",	&mcbsp3_ick),
-	CLK("omap-mcbsp.4", "ick",	&mcbsp4_ick),
-	CLK(NULL,	"mcbsp4_ick",	&mcbsp2_ick),
-	CLK(NULL,	"mcbsp3_ick",	&mcbsp3_ick),
-	CLK(NULL,	"mcbsp2_ick",	&mcbsp4_ick),
-	CLK(NULL,	"mcbsp2_fck",	&mcbsp2_fck),
-	CLK(NULL,	"mcbsp3_fck",	&mcbsp3_fck),
-	CLK(NULL,	"mcbsp4_fck",	&mcbsp4_fck),
-	CLK("etb",	"emu_src_ck",	&emu_src_ck),
-	CLK(NULL,	"emu_src_ck",	&emu_src_ck),
-	CLK(NULL,	"pclk_fck",	&pclk_fck),
-	CLK(NULL,	"pclkx2_fck",	&pclkx2_fck),
-	CLK(NULL,	"atclk_fck",	&atclk_fck),
-	CLK(NULL,	"traceclk_src_fck", &traceclk_src_fck),
-	CLK(NULL,	"traceclk_fck",	&traceclk_fck),
-	CLK(NULL,	"secure_32k_fck", &secure_32k_fck),
-	CLK(NULL,	"gpt12_fck",	&gpt12_fck),
-	CLK(NULL,	"wdt1_fck",	&wdt1_fck),
-	CLK(NULL,	"timer_32k_ck",	&omap_32k_fck),
-	CLK(NULL,	"timer_sys_ck",	&sys_ck),
-	CLK(NULL,	"cpufreq_ck",	&dpll1_ck),
-};
-
-static const char *enable_init_clks[] = {
-	"sdrc_ick",
-	"gpmc_fck",
-	"omapctrl_ick",
-};
-
-int __init omap3xxx_clk_init(void)
-{
-	if (omap3_has_192mhz_clk())
-		omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
-
-	if (cpu_is_omap3630()) {
-		dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
-		dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
-		dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
-		dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
-		dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
-		dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
-	}
-
-	/*
-	 * XXX This type of dynamic rewriting of the clock tree is
-	 * deprecated and should be revised soon.
-	 */
-	if (cpu_is_omap3630())
-		dpll4_dd = dpll4_dd_3630;
-	else
-		dpll4_dd = dpll4_dd_34xx;
-
-
-	/*
-	 * 3505 must be tested before 3517, since 3517 returns true
-	 * for both AM3517 chips and AM3517 family chips, which
-	 * includes 3505.  Unfortunately there's no obvious family
-	 * test for 3517/3505 :-(
-	 */
-	if (soc_is_am35xx()) {
-		cpu_mask = RATE_IN_34XX;
-		omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks));
-		omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-				     ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-		omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-	} else if (cpu_is_omap3630()) {
-		cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
-		omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks));
-		omap_clocks_register(omap36xx_omap3430es2plus_clks,
-				     ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-		omap_clocks_register(omap34xx_omap36xx_clks,
-				     ARRAY_SIZE(omap34xx_omap36xx_clks));
-		omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-				     ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-		omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-	} else if (cpu_is_omap34xx()) {
-		if (omap_rev() == OMAP3430_REV_ES1_0) {
-			cpu_mask = RATE_IN_3430ES1;
-			omap_clocks_register(omap3430es1_clks,
-					     ARRAY_SIZE(omap3430es1_clks));
-			omap_clocks_register(omap34xx_omap36xx_clks,
-					     ARRAY_SIZE(omap34xx_omap36xx_clks));
-			omap_clocks_register(omap3xxx_clks,
-					     ARRAY_SIZE(omap3xxx_clks));
-		} else {
-			/*
-			 * Assume that anything that we haven't matched yet
-			 * has 3430ES2-type clocks.
-			 */
-			cpu_mask = RATE_IN_3430ES2PLUS;
-			omap_clocks_register(omap34xx_omap36xx_clks,
-					     ARRAY_SIZE(omap34xx_omap36xx_clks));
-			omap_clocks_register(omap36xx_omap3430es2plus_clks,
-					     ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-			omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-					     ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-			omap_clocks_register(omap3xxx_clks,
-					     ARRAY_SIZE(omap3xxx_clks));
-		}
-	} else {
-		WARN(1, "clock: could not identify OMAP3 variant\n");
-	}
-
-		omap2_clk_disable_autoidle_all();
-
-	omap2_clk_enable_init_clocks(enable_init_clks,
-				     ARRAY_SIZE(enable_init_clks));
-
-	pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
-		(clk_get_rate(&osc_sys_ck) / 1000000),
-		(clk_get_rate(&osc_sys_ck) / 100000) % 10,
-		(clk_get_rate(&core_ck) / 1000000),
-		(clk_get_rate(&arm_fck) / 1000000));
-
-	/*
-	 * Lock DPLL5 -- here only until other device init code can
-	 * handle this
-	 */
-	if (omap_rev() >= OMAP3430_REV_ES2_0)
-		omap3_clk_lock_dpll5();
-
-	/* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
-	sdrc_ick_p = clk_get(NULL, "sdrc_ick");
-	arm_fck_p = clk_get(NULL, "arm_fck");
-
-	return 0;
-}
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 4ae4cce..6124db5 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -23,7 +23,6 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
-#include <linux/clk-private.h>
 #include <asm/cpu.h>
 
 #include <trace/events/power.h>
@@ -633,21 +632,6 @@
 };
 
 /**
- * omap_clocks_register - register an array of omap_clk
- * @ocs: pointer to an array of omap_clk to register
- */
-void __init omap_clocks_register(struct omap_clk oclks[], int cnt)
-{
-	struct omap_clk *c;
-
-	for (c = oclks; c < oclks + cnt; c++) {
-		clkdev_add(&c->lk);
-		if (!__clk_init(NULL, c->lk.clk))
-			omap2_init_clk_hw_omap_clocks(c->lk.clk);
-	}
-}
-
-/**
  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
  * @mpurate_ck_name: clk name of the clock to change rate
  *
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 1cf9dd8..a56742f 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -40,23 +40,29 @@
 struct clockdomain;
 
 #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)	\
-	static struct clk _name = {				\
+	static struct clk_core _name##_core = {			\
 		.name = #_name,					\
 		.hw = &_name##_hw.hw,				\
 		.parent_names = _parent_array_name,		\
 		.num_parents = ARRAY_SIZE(_parent_array_name),	\
 		.ops = &_clkops_name,				\
+	};							\
+	static struct clk _name = {				\
+		.core = &_name##_core,				\
 	};
 
 #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,	\
 				_clkops_name, _flags)		\
-	static struct clk _name = {				\
+	static struct clk_core _name##_core = {			\
 		.name = #_name,					\
 		.hw = &_name##_hw.hw,				\
 		.parent_names = _parent_array_name,		\
 		.num_parents = ARRAY_SIZE(_parent_array_name),	\
 		.ops = &_clkops_name,				\
 		.flags = _flags,				\
+	};							\
+	static struct clk _name = {				\
+		.core = &_name##_core,				\
 	};
 
 #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)		\
@@ -238,7 +244,6 @@
 extern struct ti_clk_features ti_clk_features;
 
 extern const struct clkops clkops_omap2_dflt_wait;
-extern const struct clkops clkops_dummy;
 extern const struct clkops clkops_omap2_dflt;
 
 extern struct clk_functions omap2_clk_functions;
@@ -247,7 +252,6 @@
 extern const struct clksel_rate gpt_sys_rates[];
 extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
-extern struct clk dummy_ck;
 
 extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
 extern const struct clk_hw_omap_ops clkhwops_wait;
@@ -272,7 +276,5 @@
 extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
-extern void omap_clocks_register(struct omap_clk *oclks, int cnt);
-
 void __init ti_clk_init_features(void);
 #endif
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index ef4d21b..61b60df 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -16,7 +16,6 @@
  * OMAP3xxx clock definition files.
  */
 
-#include <linux/clk-private.h>
 #include "clock.h"
 
 /* clksel_rate data common to 24xx/343x */
@@ -114,13 +113,3 @@
 	{ .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
 	{ .div = 0 },
 };
-
-/* Clocks shared between various OMAP SoCs */
-
-static struct clk_ops dummy_ck_ops = {};
-
-struct clk dummy_ck = {
-	.name = "dummy_clk",
-	.ops = &dummy_ck_ops,
-	.flags = CLK_IS_BASIC,
-};
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index c2da2a0..44e57ec 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -410,7 +410,7 @@
 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
 	int r;
 	struct dpll_data *dd;
-	struct clk *parent;
+	struct clk_hw *parent;
 
 	dd = clk->dpll_data;
 	if (!dd)
@@ -427,13 +427,13 @@
 		}
 	}
 
-	parent = __clk_get_parent(hw->clk);
+	parent = __clk_get_hw(__clk_get_parent(hw->clk));
 
 	if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
-		WARN_ON(parent != dd->clk_bypass);
+		WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
 		r = _omap3_noncore_dpll_bypass(clk);
 	} else {
-		WARN_ON(parent != dd->clk_ref);
+		WARN_ON(parent != __clk_get_hw(dd->clk_ref));
 		r = _omap3_noncore_dpll_lock(clk);
 	}
 
@@ -473,6 +473,8 @@
  * in failure.
  */
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long min_rate,
+				       unsigned long max_rate,
 				       unsigned long *best_parent_rate,
 				       struct clk_hw **best_parent_clk)
 {
@@ -549,7 +551,8 @@
 	if (!dd)
 		return -EINVAL;
 
-	if (__clk_get_parent(hw->clk) != dd->clk_ref)
+	if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
+	    __clk_get_hw(dd->clk_ref))
 		return -EINVAL;
 
 	if (dd->last_rounded_rate == 0)
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index fc71224..f231be0 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -202,6 +202,8 @@
  * in failure.
  */
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk)
 {
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index e60780f..c4871c5 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -461,7 +461,17 @@
 	omap3xxx_clockdomains_init();
 	omap3xxx_hwmod_init();
 	omap_hwmod_init_postsetup();
-	omap_clk_soc_init = omap3xxx_clk_init;
+	if (!of_have_populated_dt()) {
+		omap3_prcm_legacy_iomaps_init();
+		if (soc_is_am35xx())
+			omap_clk_soc_init = am35xx_clk_legacy_init;
+		else if (cpu_is_omap3630())
+			omap_clk_soc_init = omap36xx_clk_legacy_init;
+		else if (omap_rev() == OMAP3430_REV_ES1_0)
+			omap_clk_soc_init = omap3430es1_clk_legacy_init;
+		else
+			omap_clk_soc_init = omap3430_clk_legacy_init;
+	}
 }
 
 void __init omap3430_init_early(void)
@@ -753,15 +763,17 @@
 
 	ti_clk_init_features();
 
-	ret = of_prcm_init();
-	if (ret)
-		return ret;
+	if (of_have_populated_dt()) {
+		ret = of_prcm_init();
+		if (ret)
+			return ret;
 
-	of_clk_init(NULL);
+		of_clk_init(NULL);
 
-	ti_dt_clk_init_retry_clks();
+		ti_dt_clk_init_retry_clks();
 
-	ti_dt_clockdomains_setup();
+		ti_dt_clockdomains_setup();
+	}
 
 	ret = omap_clk_soc_init();
 
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 2418bdf..cee0fe1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -242,7 +242,7 @@
 }
 omap_early_initcall(omap4_sar_ram_init);
 
-static struct of_device_id gic_match[] = {
+static const struct of_device_id gic_match[] = {
 	{ .compatible = "arm,cortex-a9-gic", },
 	{ .compatible = "arm,cortex-a15-gic", },
 	{ },
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 92afb72..355b089 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1692,16 +1692,15 @@
 	if (ret == -EBUSY)
 		pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name);
 
-	if (!ret) {
+	if (oh->clkdm) {
 		/*
 		 * Set the clockdomain to HW_AUTO, assuming that the
 		 * previous state was HW_AUTO.
 		 */
-		if (oh->clkdm && hwsup)
+		if (hwsup)
 			clkdm_allow_idle(oh->clkdm);
-	} else {
-		if (oh->clkdm)
-			clkdm_hwmod_disable(oh->clkdm, oh);
+
+		clkdm_hwmod_disable(oh->clkdm, oh);
 	}
 
 	return ret;
@@ -2698,6 +2697,7 @@
 	INIT_LIST_HEAD(&oh->master_ports);
 	INIT_LIST_HEAD(&oh->slave_ports);
 	spin_lock_init(&oh->_lock);
+	lockdep_set_class(&oh->_lock, &oh->hwmod_key);
 
 	oh->_state = _HWMOD_STATE_REGISTERED;
 
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 9d4bec6e..9611c91 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -674,6 +674,7 @@
 	u32				_sysc_cache;
 	void __iomem			*_mpu_rt_va;
 	spinlock_t			_lock;
+	struct lock_class_key		hwmod_key; /* unique lock class */
 	struct list_head		node;
 	struct omap_hwmod_ocp_if	*_mpu_port;
 	unsigned int			(*xlate_irq)(unsigned int);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e8692e7..16fe7a1 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1466,55 +1466,18 @@
  *
  */
 
-static struct omap_hwmod_class dra7xx_pcie_hwmod_class = {
+static struct omap_hwmod_class dra7xx_pciess_hwmod_class = {
 	.name	= "pcie",
 };
 
 /* pcie1 */
-static struct omap_hwmod dra7xx_pcie1_hwmod = {
+static struct omap_hwmod dra7xx_pciess1_hwmod = {
 	.name		= "pcie1",
-	.class		= &dra7xx_pcie_hwmod_class,
+	.class		= &dra7xx_pciess_hwmod_class,
 	.clkdm_name	= "pcie_clkdm",
 	.main_clk	= "l4_root_clk_div",
 	.prcm = {
 		.omap4 = {
-			.clkctrl_offs	= DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/* pcie2 */
-static struct omap_hwmod dra7xx_pcie2_hwmod = {
-	.name		= "pcie2",
-	.class		= &dra7xx_pcie_hwmod_class,
-	.clkdm_name	= "pcie_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET,
-			.modulemode   = MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-/*
- * 'PCIE PHY' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = {
-	.name	= "pcie-phy",
-};
-
-/* pcie1 phy */
-static struct omap_hwmod dra7xx_pcie1_phy_hwmod = {
-	.name		= "pcie1-phy",
-	.class		= &dra7xx_pcie_phy_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
-	.main_clk	= "l4_root_clk_div",
-	.prcm = {
-		.omap4 = {
 			.clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET,
 			.context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET,
 			.modulemode   = MODULEMODE_SWCTRL,
@@ -1522,11 +1485,11 @@
 	},
 };
 
-/* pcie2 phy */
-static struct omap_hwmod dra7xx_pcie2_phy_hwmod = {
-	.name		= "pcie2-phy",
-	.class		= &dra7xx_pcie_phy_hwmod_class,
-	.clkdm_name	= "l3init_clkdm",
+/* pcie2 */
+static struct omap_hwmod dra7xx_pciess2_hwmod = {
+	.name		= "pcie2",
+	.class		= &dra7xx_pciess_hwmod_class,
+	.clkdm_name	= "pcie_clkdm",
 	.main_clk	= "l4_root_clk_div",
 	.prcm = {
 		.omap4 = {
@@ -2877,50 +2840,34 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_1 -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = {
+/* l3_main_1 -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
 	.master		= &dra7xx_l3_main_1_hwmod,
-	.slave		= &dra7xx_pcie1_hwmod,
+	.slave		= &dra7xx_pciess1_hwmod,
 	.clk		= "l3_iclk_div",
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> pcie1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = {
+/* l4_cfg -> pciess1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = {
 	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_pcie1_hwmod,
+	.slave		= &dra7xx_pciess1_hwmod,
 	.clk		= "l4_root_clk_div",
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l3_main_1 -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = {
+/* l3_main_1 -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = {
 	.master		= &dra7xx_l3_main_1_hwmod,
-	.slave		= &dra7xx_pcie2_hwmod,
+	.slave		= &dra7xx_pciess2_hwmod,
 	.clk		= "l3_iclk_div",
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-/* l4_cfg -> pcie2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = {
+/* l4_cfg -> pciess2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
 	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_pcie2_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie1 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_pcie1_phy_hwmod,
-	.clk		= "l4_root_clk_div",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pcie2 phy */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = {
-	.master		= &dra7xx_l4_cfg_hwmod,
-	.slave		= &dra7xx_pcie2_phy_hwmod,
+	.slave		= &dra7xx_pciess2_hwmod,
 	.clk		= "l4_root_clk_div",
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -3327,12 +3274,10 @@
 	&dra7xx_l4_cfg__mpu,
 	&dra7xx_l4_cfg__ocp2scp1,
 	&dra7xx_l4_cfg__ocp2scp3,
-	&dra7xx_l3_main_1__pcie1,
-	&dra7xx_l4_cfg__pcie1,
-	&dra7xx_l3_main_1__pcie2,
-	&dra7xx_l4_cfg__pcie2,
-	&dra7xx_l4_cfg__pcie1_phy,
-	&dra7xx_l4_cfg__pcie2_phy,
+	&dra7xx_l3_main_1__pciess1,
+	&dra7xx_l4_cfg__pciess1,
+	&dra7xx_l3_main_1__pciess2,
+	&dra7xx_l4_cfg__pciess2,
 	&dra7xx_l3_main_1__qspi,
 	&dra7xx_l4_per3__rtcss,
 	&dra7xx_l4_cfg__sata,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 190fa43..e642b07 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -173,6 +173,7 @@
 
 static void __init omap3_evm_legacy_init(void)
 {
+	hsmmc2_internal_input_clk();
 	legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149);
 }
 
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 77752e4..b9061a6 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -20,6 +20,7 @@
 extern u16 prm_features;
 extern void omap2_set_globals_prm(void __iomem *prm);
 int of_prcm_init(void);
+void omap3_prcm_legacy_iomaps_init(void);
 # endif
 
 /*
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index c5e00c6..5713bbd 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -674,7 +674,7 @@
 	return prm_register(&omap3xxx_prm_ll_data);
 }
 
-static struct of_device_id omap3_prm_dt_match_table[] = {
+static const struct of_device_id omap3_prm_dt_match_table[] = {
 	{ .compatible = "ti,omap3-prm" },
 	{ }
 };
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 408c64e..d6d6bc3 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -252,10 +252,10 @@
 {
 	saved_mask[0] =
 		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-					OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+					OMAP4_PRM_IRQENABLE_MPU_OFFSET);
 	saved_mask[1] =
 		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-					OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+					OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
 
 	omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
 				 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
@@ -712,7 +712,7 @@
 	return prm_register(&omap44xx_prm_ll_data);
 }
 
-static struct of_device_id omap_prm_dt_match_table[] = {
+static const struct of_device_id omap_prm_dt_match_table[] = {
 	{ .compatible = "ti,omap4-prm" },
 	{ .compatible = "ti,omap5-prm" },
 	{ .compatible = "ti,dra7-prm" },
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 264b5e2..bfaa7ba 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -35,6 +35,8 @@
 #include "prm44xx.h"
 #include "common.h"
 #include "clock.h"
+#include "cm.h"
+#include "control.h"
 
 /*
  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -641,6 +643,15 @@
 	return 0;
 }
 
+void __init omap3_prcm_legacy_iomaps_init(void)
+{
+	ti_clk_ll_ops = &omap_clk_ll_ops;
+
+	clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
+	clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
+	clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
+}
+
 static int __init prm_late_init(void)
 {
 	if (prm_ll_data->late_init)
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index a219dc3..e03d8b5 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -27,7 +27,6 @@
 	select CPU_V7
 	select HAVE_ARM_SCU if SMP
 	select HAVE_SMP
-	select SMP_ON_UP if SMP
 	help
           Support for CSR SiRFSoC ARM Cortex A7 Platform
 
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 0c819bb..8cadb30 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -21,7 +21,7 @@
 }
 
 #ifdef CONFIG_ARCH_ATLAS6
-static const char *atlas6_dt_match[] __initconst = {
+static const char *const atlas6_dt_match[] __initconst = {
 	"sirf,atlas6",
 	NULL
 };
@@ -36,7 +36,7 @@
 #endif
 
 #ifdef CONFIG_ARCH_PRIMA2
-static const char *prima2_dt_match[] __initconst = {
+static const char *const prima2_dt_match[] __initconst = {
 	"sirf,prima2",
 	NULL
 };
@@ -52,7 +52,7 @@
 #endif
 
 #ifdef CONFIG_ARCH_ATLAS7
-static const char *atlas7_dt_match[] __initdata = {
+static const char *const atlas7_dt_match[] __initconst = {
 	"sirf,atlas7",
 	NULL
 };
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index fc2b03c..e46c910 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -40,7 +40,7 @@
 	spin_unlock(&boot_lock);
 }
 
-static struct of_device_id clk_ids[]  = {
+static const struct of_device_id clk_ids[]  = {
 	{ .compatible = "sirf,atlas7-clkc" },
 	{},
 };
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 343c4e3..f6d02e4 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -36,6 +36,7 @@
 #include <linux/platform_data/video-pxafb.h>
 #include <mach/bitfield.h>
 #include <linux/platform_data/mmc-pxamci.h>
+#include <linux/smc91x.h>
 
 #include "generic.h"
 #include "devices.h"
@@ -81,11 +82,16 @@
 	}
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
 	.name		= "smc91x",
 	.id		= 0,
 	.num_resources	= ARRAY_SIZE(smc91x_resources),
 	.resource	= smc91x_resources,
+	.dev.platform_data = &smc91x_platdata,
 };
 
 static void idp_backlight_power(int on)
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index ad777b3..eaee2c2 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pwm_backlight.h>
+#include <linux/smc91x.h>
 
 #include <asm/types.h>
 #include <asm/setup.h>
@@ -189,15 +190,20 @@
 	[1] = {
 		.start	= LPD270_ETHERNET_IRQ,
 		.end	= LPD270_ETHERNET_IRQ,
-		.flags	= IORESOURCE_IRQ,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
 	},
 };
 
+struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
 	.name		= "smc91x",
 	.id		= 0,
 	.num_resources	= ARRAY_SIZE(smc91x_resources),
 	.resource	= smc91x_resources,
+	.dev.platform_data = &smc91x_platdata,
 };
 
 static struct resource lpd270_flash_resources[] = {
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 850e506..c309593 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -28,6 +28,7 @@
 #include <linux/platform_data/video-clcd-versatile.h>
 #include <linux/io.h>
 #include <linux/smsc911x.h>
+#include <linux/smc91x.h>
 #include <linux/ata_platform.h>
 #include <linux/amba/mmci.h>
 #include <linux/gfp.h>
@@ -94,6 +95,10 @@
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device realview_eth_device = {
 	.name		= "smsc911x",
 	.id		= 0,
@@ -107,6 +112,8 @@
 	realview_eth_device.resource = res;
 	if (strcmp(realview_eth_device.name, "smsc911x") == 0)
 		realview_eth_device.dev.platform_data = &smsc911x_config;
+	else
+		realview_eth_device.dev.platform_data = &smc91x_platdata;
 
 	return platform_device_register(&realview_eth_device);
 }
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 64c88d6..b3869cb 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -234,7 +234,7 @@
 	[1] = {
 		.start		= IRQ_EB_ETH,
 		.end		= IRQ_EB_ETH,
-		.flags		= IORESOURCE_IRQ,
+		.flags		= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
 	},
 };
 
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index 5078932..ae4eb7c 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -11,6 +11,7 @@
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select DW_APB_TIMER_OF
+	select REGULATOR if PM
 	select ROCKCHIP_TIMER
 	select ARM_GLOBAL_TIMER
 	select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 7d752ff..7c889c0 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -24,7 +24,13 @@
 extern unsigned long rk3288_bootram_sz;
 
 void rockchip_slp_cpu_resume(void);
+#ifdef CONFIG_PM_SLEEP
 void __init rockchip_suspend_init(void);
+#else
+static inline void rockchip_suspend_init(void)
+{
+}
+#endif
 
 /****** following is rk3288 defined **********/
 #define RK3288_PMU_WAKEUP_CFG0		0x00
diff --git a/arch/arm/mach-s5pv210/s5pv210.c b/arch/arm/mach-s5pv210/s5pv210.c
index 43eb1eaea..83e656e 100644
--- a/arch/arm/mach-s5pv210/s5pv210.c
+++ b/arch/arm/mach-s5pv210/s5pv210.c
@@ -63,7 +63,7 @@
 	s5pv210_pm_init();
 }
 
-static char const *s5pv210_dt_compat[] __initconst = {
+static char const *const s5pv210_dt_compat[] __initconst = {
 	"samsung,s5pc110",
 	"samsung,s5pv210",
 	NULL
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 169262e..af868d25 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -258,12 +259,17 @@
 			0x02000000, "smc91x-attrib"),
 		{ .flags = IORESOURCE_IRQ },
 	};
+	struct smc91x_platdata smc91x_platdata = {
+		.flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
+	};
 	struct platform_device_info smc91x_devinfo = {
 		.parent = &dev->dev,
 		.name = "smc91x",
 		.id = 0,
 		.res = smc91x_resources,
 		.num_res = ARRAY_SIZE(smc91x_resources),
+		.data = &smc91x_platdata,
+		.size_data = sizeof(smc91x_platdata),
 	};
 	int ret, irq;
 
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 0912618..1525d7b 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -11,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/setup.h>
@@ -43,12 +44,18 @@
 #endif
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
 
 static struct platform_device smc91x_device = {
 	.name		= "smc91x",
 	.id		= 0,
 	.num_resources	= ARRAY_SIZE(smc91x_resources),
 	.resource	= smc91x_resources,
+	.dev = {
+		.platform_data  = &smc91x_platdata,
+	},
 };
 
 static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index aad97be..37f7b15 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -37,7 +37,7 @@
 	iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
 }
 
-static const char *emev2_boards_compat_dt[] __initconst = {
+static const char *const emev2_boards_compat_dt[] __initconst = {
 	"renesas,emev2",
 	NULL,
 };
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 483cb46..a0f3b1c 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -45,6 +45,6 @@
 
 extern unsigned long socfpga_cpu1start_addr;
 
-#define SOCFPGA_SCU_VIRT_BASE   0xfffec000
+#define SOCFPGA_SCU_VIRT_BASE   0xfee00000
 
 #endif
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 383d61e..f5e597c 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -23,6 +23,7 @@
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
+#include <asm/cacheflush.h>
 
 #include "core.h"
 
@@ -73,6 +74,10 @@
 			(u32 *) &socfpga_cpu1start_addr))
 		pr_err("SMP: Need cpu1-start-addr in device tree.\n");
 
+	/* Ensure that socfpga_cpu1start_addr is visible to other CPUs */
+	smp_wmb();
+	sync_cache_w(&socfpga_cpu1start_addr);
+
 	sys_manager_base_addr = of_iomap(np, 0);
 
 	np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 8825bc9..3b1ac46 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -13,6 +13,7 @@
 	select ARM_ERRATA_775420
 	select PL310_ERRATA_753970 if CACHE_L2X0
 	select PL310_ERRATA_769419 if CACHE_L2X0
+	select RESET_CONTROLLER
 	help
 	  Include support for STiH41x SOCs like STiH415/416 using the device tree
 	  for discovery
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index b067390..b373aca 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -18,6 +18,7 @@
 	"st,stih415",
 	"st,stih416",
 	"st,stih407",
+	"st,stih410",
 	"st,stih418",
 	NULL
 };
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index ef016af..914341b 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -91,8 +91,6 @@
 	struct soc_device *soc_dev;
 	struct device *parent = NULL;
 
-	tegra_clocks_apply_init_table();
-
 	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
 	if (!soc_dev_attr)
 		goto out;
diff --git a/arch/arm/mach-ux500/pm_domains.c b/arch/arm/mach-ux500/pm_domains.c
index 0d4b5b4..4d71c90 100644
--- a/arch/arm/mach-ux500/pm_domains.c
+++ b/arch/arm/mach-ux500/pm_domains.c
@@ -49,7 +49,7 @@
 	[DOMAIN_VAPE] = &ux500_pm_domain_vape,
 };
 
-static struct of_device_id ux500_pm_domain_matches[] = {
+static const struct of_device_id ux500_pm_domain_matches[] __initconst = {
 	{ .compatible = "stericsson,ux500-pm-domains", },
 	{ },
 };
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index 9f9bc61..7de3e92 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -35,7 +35,7 @@
 			     versatile_auxdata_lookup, NULL);
 }
 
-static const char *versatile_dt_match[] __initconst = {
+static const char *const versatile_dt_match[] __initconst = {
 	"arm,versatile-ab",
 	"arm,versatile-pb",
 	NULL,
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index d6b16d9..3c2509b 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -73,6 +73,7 @@
 	depends on MCPM
 	select ARM_CCI
 	select ARCH_VEXPRESS_SPC
+	select ARM_CPU_SUSPEND
 	help
 	  Support for CPU and cluster power management on Versatile Express
 	  with a TC2 (A15x2 A7x3) big.LITTLE core tile.
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c43c714..9b4f29e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -892,13 +892,6 @@
 
 if CACHE_L2X0
 
-config CACHE_PL310
-	bool
-	default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
-	help
-	  This option enables optimisations for the PL310 cache
-	  controller.
-
 config PL310_ERRATA_588369
 	bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
 	help
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c6c7696..8f15f70 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1131,23 +1131,22 @@
 	}
 
 	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
-	if (ret)
-		return;
-
-	switch (assoc) {
-	case 16:
-		*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-		*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
-		*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-		break;
-	case 8:
-		*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-		*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
-		break;
-	default:
-		pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
-		       assoc);
-		break;
+	if (!ret) {
+		switch (assoc) {
+		case 16:
+			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+			*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
+			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+			break;
+		case 8:
+			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
+			break;
+		default:
+			pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
+			       assoc);
+			break;
+		}
 	}
 
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 903dba0..c274476 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -171,7 +171,7 @@
 	 */
 	if (sizeof(mask) != sizeof(dma_addr_t) &&
 	    mask > (dma_addr_t)~0 &&
-	    dma_to_pfn(dev, ~0) < max_pfn) {
+	    dma_to_pfn(dev, ~0) < max_pfn - 1) {
 		if (warn) {
 			dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
 				 mask);
@@ -1106,7 +1106,7 @@
 	int i = 0;
 
 	if (array_size <= PAGE_SIZE)
-		pages = kzalloc(array_size, gfp);
+		pages = kzalloc(array_size, GFP_KERNEL);
 	else
 		pages = vzalloc(array_size);
 	if (!pages)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a982dc3..6333d9c 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -552,6 +552,7 @@
 
 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 		inf->name, fsr, addr);
+	show_pte(current->mm, addr);
 
 	info.si_signo = inf->sig;
 	info.si_errno = 0;
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 004e35c..cf30daf 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -49,7 +49,10 @@
 		WARN_ON_ONCE(1);
 	}
 
-	if (!is_module_address(start) || !is_module_address(end - 1))
+	if (start < MODULES_VADDR || start >= MODULES_END)
+		return -EINVAL;
+
+	if (end < MODULES_VADDR || start >= MODULES_END)
 		return -EINVAL;
 
 	data.set_mask = set_mask;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index f1ad9c2..a857794 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -622,7 +622,7 @@
 		};
 
 		sgenet0: ethernet@1f210000 {
-			compatible = "apm,xgene-enet";
+			compatible = "apm,xgene1-sgenet";
 			status = "disabled";
 			reg = <0x0 0x1f210000 0x0 0xd100>,
 			      <0x0 0x1f200000 0x0 0Xc300>,
@@ -636,7 +636,7 @@
 		};
 
 		xgenet: ethernet@1f610000 {
-			compatible = "apm,xgene-enet";
+			compatible = "apm,xgene1-xgenet";
 			status = "disabled";
 			reg = <0x0 0x1f610000 0x0 0xd100>,
 			      <0x0 0x1f600000 0x0 0Xc300>,
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts
index 27f3296..4eac8dc 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dts
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dts
@@ -34,6 +34,7 @@
 			reg = <0x0 0x0>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@1 {
 			device_type = "cpu";
@@ -41,6 +42,7 @@
 			reg = <0x0 0x1>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@2 {
 			device_type = "cpu";
@@ -48,6 +50,7 @@
 			reg = <0x0 0x2>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@3 {
 			device_type = "cpu";
@@ -55,6 +58,11 @@
 			reg = <0x0 0x3>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index d429129..133ee59 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -39,6 +39,7 @@
 			reg = <0x0 0x0>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A57_L2>;
 		};
 
 		A57_1: cpu@1 {
@@ -46,6 +47,7 @@
 			reg = <0x0 0x1>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A57_L2>;
 		};
 
 		A53_0: cpu@100 {
@@ -53,6 +55,7 @@
 			reg = <0x0 0x100>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A53_L2>;
 		};
 
 		A53_1: cpu@101 {
@@ -60,6 +63,7 @@
 			reg = <0x0 0x101>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A53_L2>;
 		};
 
 		A53_2: cpu@102 {
@@ -67,6 +71,7 @@
 			reg = <0x0 0x102>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A53_L2>;
 		};
 
 		A53_3: cpu@103 {
@@ -74,6 +79,15 @@
 			reg = <0x0 0x103>;
 			device_type = "cpu";
 			enable-method = "psci";
+			next-level-cache = <&A53_L2>;
+		};
+
+		A57_L2: l2-cache0 {
+			compatible = "cache";
+		};
+
+		A53_L2: l2-cache1 {
+			compatible = "cache";
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index efc59b3..20addab 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -37,6 +37,7 @@
 			reg = <0x0 0x0>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@1 {
 			device_type = "cpu";
@@ -44,6 +45,7 @@
 			reg = <0x0 0x1>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@2 {
 			device_type = "cpu";
@@ -51,6 +53,7 @@
 			reg = <0x0 0x2>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
 		};
 		cpu@3 {
 			device_type = "cpu";
@@ -58,6 +61,11 @@
 			reg = <0x0 0x3>;
 			enable-method = "spin-table";
 			cpu-release-addr = <0x0 0x8000fff8>;
+			next-level-cache = <&L2_0>;
+		};
+
+		L2_0: l2-cache0 {
+			compatible = "cache";
 		};
 	};
 
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 5720608..abb79b3 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -29,7 +29,7 @@
 obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
 aes-neon-blk-y := aes-glue-neon.o aes-neon.o
 
-AFLAGS_aes-ce.o		:= -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-ce.o		:= -DINTERLEAVE=4
 AFLAGS_aes-neon.o	:= -DINTERLEAVE=4
 
 CFLAGS_aes-glue-ce.o	:= -DUSE_V8_CRYPTO_EXTENSIONS
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5901480..750bac4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -20,6 +20,9 @@
 #error "Only include this from assembly code"
 #endif
 
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
@@ -155,3 +158,5 @@
 #endif
 	orr	\rd, \lbits, \hbits, lsl #32
 	.endm
+
+#endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 0710654..c60643f 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -1,6 +1,8 @@
 #ifndef __ASM_CPUIDLE_H
 #define __ASM_CPUIDLE_H
 
+#include <asm/proc-fns.h>
+
 #ifdef CONFIG_CPU_IDLE
 extern int cpu_init_idle(unsigned int cpu);
 extern int cpu_suspend(unsigned long arg);
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index e2ff32a..d2f4942 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -264,8 +264,10 @@
 __AARCH64_INSN_FUNCS(bics,	0x7F200000, 0x6A200000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
-__AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
-__AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(cbz,	0x7F000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz,	0x7F000000, 0x35000000)
+__AARCH64_INSN_FUNCS(tbz,	0x7F000000, 0x36000000)
+__AARCH64_INSN_FUNCS(tbnz,	0x7F000000, 0x37000000)
 __AARCH64_INSN_FUNCS(bcond,	0xFF000010, 0x54000000)
 __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 94674eb..54bb4ba 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -129,6 +129,9 @@
  * 40 bits wide (T0SZ = 24).  Systems with a PARange smaller than 40 bits are
  * not known to exist and will break with this configuration.
  *
+ * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
+ * (see hyp-init.S).
+ *
  * Note that when using 4K pages, we concatenate two first level page tables
  * together.
  *
@@ -138,7 +141,6 @@
 #ifdef CONFIG_ARM64_64K_PAGES
 /*
  * Stage2 translation configuration:
- * 40bits output (PS = 2)
  * 40bits input  (T0SZ = 24)
  * 64kB pages (TG0 = 1)
  * 2 level page tables (SL = 1)
@@ -150,7 +152,6 @@
 #else
 /*
  * Stage2 translation configuration:
- * 40bits output (PS = 2)
  * 40bits input  (T0SZ = 24)
  * 4kB pages (TG0 = 0)
  * 3 level page tables (SL = 1)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b53..bbfb600 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,6 +158,8 @@
 #define PTRS_PER_S2_PGD		(1 << PTRS_PER_S2_PGD_SHIFT)
 #define S2_PGD_ORDER		get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 
+#define kvm_pgd_index(addr)	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+
 /*
  * If we are concatenating first level stage-2 page tables, we would have less
  * than or equal to 16 pointers in the fake PGD, because that's what the
@@ -171,43 +173,6 @@
 #define KVM_PREALLOC_LEVEL	(0)
 #endif
 
-/**
- * kvm_prealloc_hwpgd - allocate inital table for VTTBR
- * @kvm:	The KVM struct pointer for the VM.
- * @pgd:	The kernel pseudo pgd
- *
- * When the kernel uses more levels of page tables than the guest, we allocate
- * a fake PGD and pre-populate it to point to the next-level page table, which
- * will be the real initial page table pointed to by the VTTBR.
- *
- * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
- * the kernel will use folded pud.  When KVM_PREALLOC_LEVEL==1, we
- * allocate 2 consecutive PUD pages.
- */
-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
-{
-	unsigned int i;
-	unsigned long hwpgd;
-
-	if (KVM_PREALLOC_LEVEL == 0)
-		return 0;
-
-	hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
-	if (!hwpgd)
-		return -ENOMEM;
-
-	for (i = 0; i < PTRS_PER_S2_PGD; i++) {
-		if (KVM_PREALLOC_LEVEL == 1)
-			pgd_populate(NULL, pgd + i,
-				     (pud_t *)hwpgd + i * PTRS_PER_PUD);
-		else if (KVM_PREALLOC_LEVEL == 2)
-			pud_populate(NULL, pud_offset(pgd, 0) + i,
-				     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
-	}
-
-	return 0;
-}
-
 static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
 	pgd_t *pgd = kvm->arch.pgd;
@@ -224,12 +189,11 @@
 	return pmd_offset(pud, 0);
 }
 
-static inline void kvm_free_hwpgd(struct kvm *kvm)
+static inline unsigned int kvm_get_hwpgd_size(void)
 {
-	if (KVM_PREALLOC_LEVEL > 0) {
-		unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
-		free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
-	}
+	if (KVM_PREALLOC_LEVEL > 0)
+		return PTRS_PER_S2_PGD * PAGE_SIZE;
+	return PTRS_PER_S2_PGD * sizeof(pgd_t);
 }
 
 static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 16449c5..800ec0e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -460,7 +460,7 @@
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 9a8fd84..941c375 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,7 +39,11 @@
 
 #include <asm/memory.h>
 
-#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+#define cpu_switch_mm(pgd,mm)				\
+do {							\
+	BUG_ON(pgd == swapper_pg_dir);			\
+	cpu_do_switch_mm(virt_to_phys(pgd),mm);		\
+} while (0)
 
 #define cpu_get_pgd()					\
 ({							\
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f9be30e..20e9591 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -45,7 +45,8 @@
 #define STACK_TOP		STACK_TOP_MAX
 #endif /* CONFIG_COMPAT */
 
-#define ARCH_LOW_ADDRESS_LIMIT	PHYS_MASK
+extern phys_addr_t arm64_dma_phys_limit;
+#define ARCH_LOW_ADDRESS_LIMIT	(arm64_dma_phys_limit - 1)
 #endif /* __KERNEL__ */
 
 struct debug_info {
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c028fe3..53d9c354 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -48,6 +48,7 @@
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 				  unsigned long addr)
 {
+	__flush_tlb_pgtable(tlb->mm, addr);
 	pgtable_page_dtor(pte);
 	tlb_remove_entry(tlb, pte);
 }
@@ -56,6 +57,7 @@
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 				  unsigned long addr)
 {
+	__flush_tlb_pgtable(tlb->mm, addr);
 	tlb_remove_entry(tlb, virt_to_page(pmdp));
 }
 #endif
@@ -64,6 +66,7 @@
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
 				  unsigned long addr)
 {
+	__flush_tlb_pgtable(tlb->mm, addr);
 	tlb_remove_entry(tlb, virt_to_page(pudp));
 }
 #endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 73f0ce5..c3bb05b 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -24,11 +24,6 @@
 #include <linux/sched.h>
 #include <asm/cputype.h>
 
-extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
-extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
-
-extern struct cpu_tlb_fns cpu_tlb;
-
 /*
  *	TLB Management
  *	==============
@@ -149,6 +144,19 @@
 }
 
 /*
+ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
+ * table levels (pgd/pud/pmd).
+ */
+static inline void __flush_tlb_pgtable(struct mm_struct *mm,
+				       unsigned long uaddr)
+{
+	unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+
+	dsb(ishst);
+	asm("tlbi	vae1is, %0" : : "r" (addr));
+	dsb(ish);
+}
+/*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
  */
 static inline void update_mmu_cache(struct vm_area_struct *vma,
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bef04af..5ee07ee 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -15,8 +15,9 @@
 arm64-obj-y		:= cputable.o debug-monitors.o entry.o irq.o fpsimd.o	\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
-			   hyp-stub.o psci.o cpu_ops.o insn.o return_address.o	\
-			   cpuinfo.o cpu_errata.o alternative.o cacheinfo.o
+			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
+			   return_address.o cpuinfo.o cpu_errata.o		\
+			   alternative.o cacheinfo.o
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o entry32.o		\
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index b42c7b4..ab21e0d 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -337,7 +337,11 @@
 
 static void efi_set_pgd(struct mm_struct *mm)
 {
-	cpu_switch_mm(mm->pgd, mm);
+	if (mm == &init_mm)
+		cpu_set_reserved_ttbr0();
+	else
+		cpu_switch_mm(mm->pgd, mm);
+
 	flush_tlb_all();
 	if (icache_is_aivivt())
 		__flush_icache_all();
@@ -354,3 +358,12 @@
 	efi_set_pgd(current->active_mm);
 	preempt_enable();
 }
+
+/*
+ * UpdateCapsule() depends on the system being shutdown via
+ * ResetSystem().
+ */
+bool efi_poweroff_required(void)
+{
+	return efi_enabled(EFI_RUNTIME_SERVICES);
+}
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index cf8556a..c851be7 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -156,7 +156,7 @@
 
 	branch = aarch64_insn_gen_branch_imm(pc,
 					     (unsigned long)ftrace_graph_caller,
-					     AARCH64_INSN_BRANCH_LINK);
+					     AARCH64_INSN_BRANCH_NOLINK);
 	nop = aarch64_insn_gen_nop();
 
 	if (enable)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 8ce88e0..07f9305 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -585,8 +585,8 @@
  * zeroing of .bss would clobber it.
  */
 	.pushsection	.data..cacheline_aligned
-ENTRY(__boot_cpu_mode)
 	.align	L1_CACHE_SHIFT
+ENTRY(__boot_cpu_mode)
 	.long	BOOT_CPU_MODE_EL2
 	.long	0
 	.popsection
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 27d4864..c8eca88 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -87,8 +87,10 @@
 
 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
 		page = vmalloc_to_page(addr);
-	else
+	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
 		page = virt_to_page(addr);
+	else
+		return addr;
 
 	BUG_ON(!page);
 	set_fixmap(fixmap, page_to_phys(page));
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fde9923..c6b1f3b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -21,6 +21,7 @@
 #include <stdarg.h>
 
 #include <linux/compat.h>
+#include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -150,6 +151,13 @@
 	local_irq_disable();
 	smp_send_stop();
 
+	/*
+	 * UpdateCapsule() depends on the system being reset via
+	 * ResetSystem().
+	 */
+	if (efi_enabled(EFI_RUNTIME_SERVICES))
+		efi_reboot(reboot_mode, NULL);
+
 	/* Now call the architecture specific reboot code. */
 	if (arm_pm_restart)
 		arm_pm_restart(reboot_mode, cmd);
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
new file mode 100644
index 0000000..cf83e61
--- /dev/null
+++ b/arch/arm64/kernel/psci-call.S
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+
+/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_hvc)
+	hvc	#0
+	ret
+ENDPROC(__invoke_psci_fn_hvc)
+
+/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+ENTRY(__invoke_psci_fn_smc)
+	smc	#0
+	ret
+ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 3425f31..9b8a70a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -57,6 +57,9 @@
 static int (*invoke_psci_fn)(u64, u64, u64, u64);
 typedef int (*psci_initcall_t)(const struct device_node *);
 
+asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+
 enum psci_function {
 	PSCI_FN_CPU_SUSPEND,
 	PSCI_FN_CPU_ON,
@@ -109,40 +112,6 @@
 			PSCI_0_2_POWER_STATE_AFFL_SHIFT;
 }
 
-/*
- * The following two functions are invoked via the invoke_psci_fn pointer
- * and will not be inlined, allowing us to piggyback on the AAPCS.
- */
-static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
-					 u64 arg2)
-{
-	asm volatile(
-			__asmeq("%0", "x0")
-			__asmeq("%1", "x1")
-			__asmeq("%2", "x2")
-			__asmeq("%3", "x3")
-			"hvc	#0\n"
-		: "+r" (function_id)
-		: "r" (arg0), "r" (arg1), "r" (arg2));
-
-	return function_id;
-}
-
-static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
-					 u64 arg2)
-{
-	asm volatile(
-			__asmeq("%0", "x0")
-			__asmeq("%1", "x1")
-			__asmeq("%2", "x2")
-			__asmeq("%3", "x3")
-			"smc	#0\n"
-		: "+r" (function_id)
-		: "r" (arg0), "r" (arg1), "r" (arg2));
-
-	return function_id;
-}
-
 static int psci_get_version(void)
 {
 	int err;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index c20a300..d26fcd4 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -154,8 +154,7 @@
 	case __SI_TIMER:
 		 err |= __put_user(from->si_tid, &to->si_tid);
 		 err |= __put_user(from->si_overrun, &to->si_overrun);
-		 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
-				   &to->si_ptr);
+		 err |= __put_user(from->si_int, &to->si_int);
 		break;
 	case __SI_POLL:
 		err |= __put_user(from->si_band, &to->si_band);
@@ -184,7 +183,7 @@
 	case __SI_MESGQ: /* But this is */
 		err |= __put_user(from->si_pid, &to->si_pid);
 		err |= __put_user(from->si_uid, &to->si_uid);
-		err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
+		err |= __put_user(from->si_int, &to->si_int);
 		break;
 	case __SI_SYS:
 		err |= __put_user((compat_uptr_t)(unsigned long)
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index fe652ff..efa79e8 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -174,8 +174,6 @@
 /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
 ENTRY(__kernel_clock_getres)
 	.cfi_startproc
-	cbz	w1, 3f
-
 	cmp	w0, #CLOCK_REALTIME
 	ccmp	w0, #CLOCK_MONOTONIC, #0x4, ne
 	b.ne	1f
@@ -188,6 +186,7 @@
 	b.ne	4f
 	ldr	x2, 6f
 2:
+	cbz	w1, 3f
 	stp	xzr, x2, [x1]
 
 3:	/* res == NULL. */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0a24b9b..ef7d112 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@
 }
 early_param("coherent_pool", early_coherent_pool);
 
-static void *__alloc_from_pool(size_t size, struct page **ret_page)
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
 {
 	unsigned long val;
 	void *ptr = NULL;
@@ -67,6 +67,8 @@
 
 		*ret_page = phys_to_page(phys);
 		ptr = (void *)val;
+		if (flags & __GFP_ZERO)
+			memset(ptr, 0, size);
 	}
 
 	return ptr;
@@ -101,6 +103,7 @@
 		flags |= GFP_DMA;
 	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
 		struct page *page;
+		void *addr;
 
 		size = PAGE_ALIGN(size);
 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@
 			return NULL;
 
 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
-		return page_address(page);
+		addr = page_address(page);
+		if (flags & __GFP_ZERO)
+			memset(addr, 0, size);
+		return addr;
 	} else {
 		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
 	}
@@ -146,7 +152,7 @@
 
 	if (!coherent && !(flags & __GFP_WAIT)) {
 		struct page *page = NULL;
-		void *addr = __alloc_from_pool(size, &page);
+		void *addr = __alloc_from_pool(size, &page, flags);
 
 		if (addr)
 			*dma_handle = phys_to_dma(dev, page_to_phys(page));
@@ -348,8 +354,6 @@
 	.mapping_error = swiotlb_dma_mapping_error,
 };
 
-extern int swiotlb_late_init_with_default_size(size_t default_size);
-
 static int __init atomic_pool_init(void)
 {
 	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -411,21 +415,13 @@
 	return -ENOMEM;
 }
 
-static int __init swiotlb_late_init(void)
+static int __init arm64_dma_init(void)
 {
-	size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+	int ret;
 
 	dma_ops = &swiotlb_dma_ops;
 
-	return swiotlb_late_init_with_default_size(swiotlb_size);
-}
-
-static int __init arm64_dma_init(void)
-{
-	int ret = 0;
-
-	ret |= swiotlb_late_init();
-	ret |= atomic_pool_init();
+	ret = atomic_pool_init();
 
 	return ret;
 }
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 71145f9..ae85da6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 #include <linux/efi.h>
+#include <linux/swiotlb.h>
 
 #include <asm/fixmap.h>
 #include <asm/memory.h>
@@ -45,6 +46,7 @@
 #include "mm.h"
 
 phys_addr_t memstart_addr __read_mostly = 0;
+phys_addr_t arm64_dma_phys_limit __read_mostly;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 static int __init early_initrd(char *p)
@@ -85,7 +87,7 @@
 
 	/* 4GB maximum for 32-bit only capable devices */
 	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-		max_dma = PFN_DOWN(max_zone_dma_phys());
+		max_dma = PFN_DOWN(arm64_dma_phys_limit);
 		zone_size[ZONE_DMA] = max_dma - min;
 	}
 	zone_size[ZONE_NORMAL] = max - max_dma;
@@ -156,8 +158,6 @@
 
 void __init arm64_memblock_init(void)
 {
-	phys_addr_t dma_phys_limit = 0;
-
 	memblock_enforce_memory_limit(memory_limit);
 
 	/*
@@ -174,8 +174,10 @@
 
 	/* 4GB maximum for 32-bit only capable devices */
 	if (IS_ENABLED(CONFIG_ZONE_DMA))
-		dma_phys_limit = max_zone_dma_phys();
-	dma_contiguous_reserve(dma_phys_limit);
+		arm64_dma_phys_limit = max_zone_dma_phys();
+	else
+		arm64_dma_phys_limit = PHYS_MASK + 1;
+	dma_contiguous_reserve(arm64_dma_phys_limit);
 
 	memblock_allow_resize();
 	memblock_dump_all();
@@ -276,6 +278,8 @@
  */
 void __init mem_init(void)
 {
+	swiotlb_init(1);
+
 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index bb0ea94..1d3ec3d 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -51,7 +51,10 @@
 		WARN_ON_ONCE(1);
 	}
 
-	if (!is_module_address(start) || !is_module_address(end - 1))
+	if (start < MODULES_VADDR || start >= MODULES_END)
+		return -EINVAL;
+
+	if (end < MODULES_VADDR || end >= MODULES_END)
 		return -EINVAL;
 
 	data.set_mask = set_mask;
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 9501bd8..68f2a8a 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -666,7 +666,14 @@
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+	P_CNT_CUD,
+	P_CNT_CDG,
+	P_CNT_CZM,
+	0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
 	/*.rotary_up_key     = KEY_UP,*/
@@ -676,10 +683,16 @@
 	.debounce	   = 10,	/* 0..17 */
 	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
 	.pm_wakeup	   = 1,
+	.pin_list	   = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
 	{
+		.start = CNT_CONFIG,
+		.end   = CNT_CONFIG + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
 		.start = IRQ_CNT,
 		.end = IRQ_CNT,
 		.flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index d64f565..d4219e8 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -1092,7 +1092,14 @@
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+	P_CNT_CUD,
+	P_CNT_CDG,
+	P_CNT_CZM,
+	0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
 	/*.rotary_up_key     = KEY_UP,*/
@@ -1102,10 +1109,16 @@
 	.debounce	   = 10,	/* 0..17 */
 	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
 	.pm_wakeup	   = 1,
+	.pin_list	   = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
 	{
+		.start = CNT_CONFIG,
+		.end   = CNT_CONFIG + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
 		.start = IRQ_CNT,
 		.end = IRQ_CNT,
 		.flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 1fe7ff2..4204b98 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -159,7 +159,7 @@
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
 	/*.rotary_up_key     = KEY_UP,*/
@@ -173,6 +173,11 @@
 
 static struct resource bfin_rotary_resources[] = {
 	{
+		.start = CNT_CONFIG,
+		.end   = CNT_CONFIG + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
 		.start = IRQ_CNT,
 		.end = IRQ_CNT,
 		.flags = IORESOURCE_IRQ,
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index e2c0b02..7f9fc27 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -75,7 +75,7 @@
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
 	/*.rotary_up_key     = KEY_UP,*/
@@ -88,6 +88,11 @@
 
 static struct resource bfin_rotary_resources[] = {
 	{
+		.start = CNT_CONFIG,
+		.end   = CNT_CONFIG + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
 		.start = IRQ_CNT,
 		.end = IRQ_CNT,
 		.flags = IORESOURCE_IRQ,
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 78d4483b..ec4db6d 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -67,6 +67,11 @@
  */
 #define pgtable_cache_init()   do { } while (0)
 
+/*
+ * c6x is !MMU, so define the simpliest implementation
+ */
+#define pgprot_writecombine pgprot_noncached
+
 #include <asm-generic/pgtable.h>
 
 #endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 93bcf2a..07d7a7e 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -123,12 +123,14 @@
 #define PGDIR_MASK		(~(PGDIR_SIZE - 1))
 #define PTRS_PER_PGD		64
 
+#define __PAGETABLE_PUD_FOLDED
 #define PUD_SHIFT		26
 #define PTRS_PER_PUD		1
 #define PUD_SIZE		(1UL << PUD_SHIFT)
 #define PUD_MASK		(~(PUD_SIZE - 1))
 #define PUE_SIZE		256
 
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT		26
 #define PMD_SIZE		(1UL << PMD_SHIFT)
 #define PMD_MASK		(~(PMD_SIZE - 1))
diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h
index 8fd8ee7..421e6ba 100644
--- a/arch/m32r/include/asm/pgtable-2level.h
+++ b/arch/m32r/include/asm/pgtable-2level.h
@@ -13,6 +13,7 @@
  * the M32R is two-level, so we don't really have any
  * PMD directory physically.
  */
+#define __PAGETABLE_PMD_FOLDED
 #define PMD_SHIFT	22
 #define PTRS_PER_PMD	1
 
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 28a145b..35ed4a9 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -54,10 +54,12 @@
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE	512
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #else
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 881071c..13272fd 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -149,8 +149,8 @@
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define	KSTK_EIP(tsk)	((tsk)->thread.kernel_context->CurrPC)
-#define	KSTK_ESP(tsk)	((tsk)->thread.kernel_context->AX[0].U0)
+#define	KSTK_EIP(tsk)	(task_pt_regs(tsk)->ctx.CurrPC)
+#define	KSTK_ESP(tsk)	(task_pt_regs(tsk)->ctx.AX[0].U0)
 
 #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
 
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 0536bc0..ef54851 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -348,8 +348,9 @@
  * The LP register should point to the location where the called function
  * should return.  [note that MAKE_SYS_CALL uses label 1] */
 	/* See if the system call number is valid */
+	blti	r12, 5f
 	addi	r11, r12, -__NR_syscalls;
-	bgei	r11,5f;
+	bgei	r11, 5f;
 	/* Figure out which function to use for this system call.  */
 	/* Note Microblaze barrel shift is optional, so don't rely on it */
 	add	r12, r12, r12;			/* convert num -> ptr */
@@ -375,7 +376,7 @@
 
 	/* The syscall number is invalid, return an error.  */
 5:
-	rtsd	r15, 8;		/* looks like a normal subroutine return */
+	braid	ret_from_trap
 	addi	r3, r0, -ENOSYS;
 
 /* Entry point used to return from a syscall/trap */
@@ -411,7 +412,7 @@
 	bri	1b
 
 	/* Maybe handle a signal */
-5:	
+5:
 	andi	r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
 	beqi	r11, 4f;		/* Signals to handle, handle them */
 
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 843713c..c7a1690 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -54,6 +54,7 @@
 	select CPU_PM if CPU_IDLE
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_BINFMT_ELF_STATE
+	select SYSCTL_EXCEPTION_TRACE
 
 menu "Machine selection"
 
@@ -376,8 +377,10 @@
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
 	select SYS_HAS_CPU_MIPS32_R3_5
+	select SYS_HAS_CPU_MIPS32_R6
 	select SYS_HAS_CPU_MIPS64_R1
 	select SYS_HAS_CPU_MIPS64_R2
+	select SYS_HAS_CPU_MIPS64_R6
 	select SYS_HAS_CPU_NEVADA
 	select SYS_HAS_CPU_RM7000
 	select SYS_SUPPORTS_32BIT_KERNEL
@@ -1033,6 +1036,9 @@
 config NO_IOPORT_MAP
 	def_bool n
 
+config GENERIC_CSUM
+	bool
+
 config GENERIC_ISA_DMA
 	bool
 	select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1146,6 +1152,9 @@
 	bool
 	select SOC_PNX833X
 
+config MIPS_SPRAM
+	bool
+
 config SWAP_IO_SPACE
 	bool
 
@@ -1304,6 +1313,22 @@
 	  specific type of processor in your system, choose those that one
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
+config CPU_MIPS32_R6
+	bool "MIPS32 Release 6 (EXPERIMENTAL)"
+	depends on SYS_HAS_CPU_MIPS32_R6
+	select CPU_HAS_PREFETCH
+	select CPU_SUPPORTS_32BIT_KERNEL
+	select CPU_SUPPORTS_HIGHMEM
+	select CPU_SUPPORTS_MSA
+	select GENERIC_CSUM
+	select HAVE_KVM
+	select MIPS_O32_FP64_SUPPORT
+	help
+	  Choose this option to build a kernel for release 6 or later of the
+	  MIPS32 architecture.  New MIPS processors, starting with the Warrior
+	  family, are based on a MIPS32r6 processor. If you own an older
+	  processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
 config CPU_MIPS64_R1
 	bool "MIPS64 Release 1"
 	depends on SYS_HAS_CPU_MIPS64_R1
@@ -1339,6 +1364,21 @@
 	  specific type of processor in your system, choose those that one
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
+config CPU_MIPS64_R6
+	bool "MIPS64 Release 6 (EXPERIMENTAL)"
+	depends on SYS_HAS_CPU_MIPS64_R6
+	select CPU_HAS_PREFETCH
+	select CPU_SUPPORTS_32BIT_KERNEL
+	select CPU_SUPPORTS_64BIT_KERNEL
+	select CPU_SUPPORTS_HIGHMEM
+	select CPU_SUPPORTS_MSA
+	select GENERIC_CSUM
+	help
+	  Choose this option to build a kernel for release 6 or later of the
+	  MIPS64 architecture.  New MIPS processors, starting with the Warrior
+	  family, are based on a MIPS64r6 processor. If you own an older
+	  processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
 config CPU_R3000
 	bool "R3000"
 	depends on SYS_HAS_CPU_R3000
@@ -1539,7 +1579,7 @@
 config CPU_MIPS32_3_5_FEATURES
 	bool "MIPS32 Release 3.5 Features"
 	depends on SYS_HAS_CPU_MIPS32_R3_5
-	depends on CPU_MIPS32_R2
+	depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
 	help
 	  Choose this option to build a kernel for release 2 or later of the
 	  MIPS32 architecture including features from the 3.5 release such as
@@ -1659,12 +1699,18 @@
 config SYS_HAS_CPU_MIPS32_R3_5
 	bool
 
+config SYS_HAS_CPU_MIPS32_R6
+	bool
+
 config SYS_HAS_CPU_MIPS64_R1
 	bool
 
 config SYS_HAS_CPU_MIPS64_R2
 	bool
 
+config SYS_HAS_CPU_MIPS64_R6
+	bool
+
 config SYS_HAS_CPU_R3000
 	bool
 
@@ -1764,11 +1810,11 @@
 #
 config CPU_MIPS32
 	bool
-	default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+	default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 config CPU_MIPS64
 	bool
-	default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+	default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1780,6 +1826,12 @@
 config CPU_MIPSR2
 	bool
 	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+	select MIPS_SPRAM
+
+config CPU_MIPSR6
+	bool
+	default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+	select MIPS_SPRAM
 
 config EVA
 	bool
@@ -2013,6 +2065,19 @@
 	default y
 	depends on MIPS_MT_SMP
 
+config MIPSR2_TO_R6_EMULATOR
+	bool "MIPS R2-to-R6 emulator"
+	depends on CPU_MIPSR6 && !SMP
+	default y
+	help
+	  Choose this option if you want to run non-R6 MIPS userland code.
+	  Even if you say 'Y' here, the emulator will still be disabled by
+	  default. You can enable it using the 'mipsr2emul' kernel option.
+	  The only reason this is a build-time option is to save ~14K from the
+	  final kernel image.
+comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+	depends on SMP && CPU_MIPSR6
+
 config MIPS_VPE_LOADER
 	bool "VPE loader support."
 	depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2148,7 +2213,7 @@
 	  here.
 
 config CPU_MICROMIPS
-	depends on 32BIT && SYS_SUPPORTS_MICROMIPS
+	depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
 	bool "microMIPS"
 	help
 	  When this option is enabled the kernel will be built using the
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 88a9f43..3a2b775 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -122,17 +122,4 @@
 	help
 	  Add several files to the debugfs to test spinlock speed.
 
-config FP32XX_HYBRID_FPRS
-	bool "Run FP32 & FPXX code with hybrid FPRs"
-	depends on MIPS_O32_FP64_SUPPORT
-	help
-	  The hybrid FPR scheme is normally used only when a program needs to
-	  execute a mix of FP32 & FP64A code, since the trapping & emulation
-	  that it entails is expensive. When enabled, this option will lead
-	  to the kernel running programs which use the FP32 & FPXX FP ABIs
-	  using the hybrid FPR scheme, which can be useful for debugging
-	  purposes.
-
-	  If unsure, say N.
-
 endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 2563a08..8f57fc7 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -122,26 +122,8 @@
 cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-# For smartmips configurations, there are hundreds of warnings due to ISA overrides
-# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
-# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
-# similar directives in the kernel will spam the build logs with the following warnings:
-# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
-# or
-# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
-# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
-# been fixed properly.
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)	+= $(call cc-option,-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
-
 cflags-$(CONFIG_SB1XXX_CORELIS)	+= $(call cc-option,-mno-sched-prolog) \
 				   -fno-omit-frame-pointer
-
-ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa	:= $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
-cflags-$(toolchain-msa)		+= -DTOOLCHAIN_SUPPORTS_MSA
-endif
-
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
@@ -156,10 +138,12 @@
 			-Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2)	+= $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
 			-Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6)	+= -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1)	+= $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 			-Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2)	+= $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
 			-Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6)	+= -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)	+= -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)	+= $(call cc-option,-march=r5400,-march=r5000) \
 			-Wa,--trap
@@ -182,6 +166,16 @@
 endif
 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
 cflags-$(CONFIG_CPU_BMIPS)	+= -march=mips32 -Wa,-mips32 -Wa,--trap
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R1; older versions as just R1.  This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+cflags-$(CONFIG_CPU_LOONGSON3)  +=					\
+	$(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+	-Wa,-mips64r2 -Wa,--trap
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS)	+= $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS)	+= $(call cc-option,-mfix-r4400,)
@@ -194,6 +188,23 @@
 endif
 endif
 
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+mips-cflags				:= "$(cflags-y)"
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)	+= $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
+cflags-$(CONFIG_CPU_MICROMIPS)		+= $(call cc-option,$(mips-cflags),-mmicromips)
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa				:= $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa)			+= -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
 #
 # Firmware support
 #
@@ -287,7 +298,11 @@
 boot-y			+= vmlinux.srec
 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
 boot-y			+= uImage
+boot-y			+= uImage.bin
+boot-y			+= uImage.bz2
 boot-y			+= uImage.gz
+boot-y			+= uImage.lzma
+boot-y			+= uImage.lzo
 endif
 
 # compressed boot image targets (arch/mips/boot/compressed/)
@@ -386,7 +401,11 @@
 	echo '  vmlinuz.bin          - Raw binary zboot image'
 	echo '  vmlinuz.srec         - SREC zboot image'
 	echo '  uImage               - U-Boot image'
+	echo '  uImage.bin           - U-Boot image (uncompressed)'
+	echo '  uImage.bz2           - U-Boot image (bz2)'
 	echo '  uImage.gz            - U-Boot image (gzip)'
+	echo '  uImage.lzma          - U-Boot image (lzma)'
+	echo '  uImage.lzo           - U-Boot image (lzo)'
 	echo '  dtbs                 - Device-tree blobs for enabled boards'
 	echo
 	echo '  These will be default as appropriate for a configured platform.'
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index 48a9dfc..6a98d2c 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -127,12 +127,20 @@
 		t = 396000000;
 	else {
 		t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+		if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+			t &= 0x3f;
 		t *= parent_rate;
 	}
 
 	return t;
 }
 
+void __init alchemy_set_lpj(void)
+{
+	preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+	preset_lpj /= 2 * HZ;
+}
+
 static struct clk_ops alchemy_clkops_cpu = {
 	.recalc_rate	= alchemy_clk_cpu_recalc,
 };
@@ -315,17 +323,26 @@
 
 /* lrclk: external synchronous static bus clock ***********************/
 
-static struct clk __init *alchemy_clk_setup_lrclk(const char *pn)
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
 {
-	/* MEM_STCFG0[15:13] = divisor.
+	/* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+	 * otherwise lrclk=pclk/4.
+	 * All other variants: MEM_STCFG0[15:13] = divisor.
 	 * L/RCLK = periph_clk / (divisor + 1)
 	 * On Au1000, Au1500, Au1100 it's called LCLK,
 	 * on later models it's called RCLK, but it's the same thing.
 	 */
 	struct clk *c;
-	unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13;
+	unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
 
-	v = (v & 7) + 1;
+	switch (t) {
+	case ALCHEMY_CPU_AU1000:
+	case ALCHEMY_CPU_AU1500:
+		v = 4 + ((v >> 11) & 1);
+		break;
+	default:	/* all other models */
+		v = ((v >> 13) & 7) + 1;
+	}
 	c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
 				      pn, 0, 1, v);
 	if (!IS_ERR(c))
@@ -546,6 +563,8 @@
 }
 
 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk)
 {
@@ -678,6 +697,8 @@
 }
 
 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk)
 {
@@ -897,6 +918,8 @@
 }
 
 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk)
 {
@@ -1060,7 +1083,7 @@
 	ERRCK(c)
 
 	/* L/RCLK: external static bus clock for synchronous mode */
-	c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK);
+	c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
 	ERRCK(c)
 
 	/* Frequency dividers 0-5 */
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 4e72daf..2902138 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -34,10 +34,12 @@
 #include <au1000.h>
 
 extern void __init board_setup(void);
-extern void set_cpuspec(void);
+extern void __init alchemy_set_lpj(void);
 
 void __init plat_mem_setup(void)
 {
+	alchemy_set_lpj();
+
 	if (au1xxx_cpu_needs_config_od())
 		/* Various early Au1xx0 errata corrected by this */
 		set_c0_config(1 << 19); /* Set Config[OD] */
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c
index 0fb5134..fd94fe8 100644
--- a/arch/mips/bcm3384/irq.c
+++ b/arch/mips/bcm3384/irq.c
@@ -180,7 +180,7 @@
 
 static struct of_device_id of_irq_ids[] __initdata = {
 	{ .compatible = "mti,cpu-interrupt-controller",
-	  .data = mips_cpu_intc_init },
+	  .data = mips_cpu_irq_of_init },
 	{ .compatible = "brcm,bcm3384-intc",
 	  .data = intc_of_init },
 	{},
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 1466c00..acb1988 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -23,6 +23,12 @@
 
 hostprogs-y := elf2ecoff
 
+suffix-y			:= bin
+suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
+suffix-$(CONFIG_KERNEL_GZIP)	:= gz
+suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_LZO)	:= lzo
+
 targets := vmlinux.ecoff
 quiet_cmd_ecoff = ECOFF	  $@
       cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
@@ -44,14 +50,53 @@
 UIMAGE_LOADADDR  = $(VMLINUX_LOAD_ADDRESS)
 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
 
+#
+# Compressed vmlinux images
+#
+
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
+extra-y += vmlinux.bin.lzo
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,bzip2)
+
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,gzip)
 
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,lzma)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,lzo)
+
+#
+# Compressed u-boot images
+#
+
+targets += uImage
+targets += uImage.bin
+targets += uImage.bz2
 targets += uImage.gz
+targets += uImage.lzma
+targets += uImage.lzo
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,uimage,none)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+	$(call if_changed,uimage,bzip2)
+
 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
 	$(call if_changed,uimage,gzip)
 
-targets += uImage
-$(obj)/uImage: $(obj)/uImage.gz FORCE
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+	$(call if_changed,uimage,lzma)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+	$(call if_changed,uimage,lzo)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
 	@ln -sf $(notdir $<) $@
 	@echo '  Image $@ is ready'
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index 2a4c52e..266c813 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -268,7 +268,6 @@
 	Elf32_Ehdr ex;
 	Elf32_Phdr *ph;
 	Elf32_Shdr *sh;
-	char *shstrtab;
 	int i, pad;
 	struct sect text, data, bss;
 	struct filehdr efh;
@@ -336,9 +335,6 @@
 				     "sh");
 	if (must_convert_endian)
 		convert_elf_shdrs(sh, ex.e_shnum);
-	/* Read in the section string table. */
-	shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset,
-			    sh[ex.e_shstrndx].sh_size, "shstrtab");
 
 	/* Figure out if we can cram the program header into an ECOFF
 	   header...  Basically, we can't handle anything but loadable
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index b752c4e..1882e64 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -18,7 +18,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
-
+#include <asm/octeon/cvmx-rst-defs.h>
 
 static u64 f;
 static u64 rdiv;
@@ -39,11 +39,20 @@
 
 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
 		union cvmx_mio_rst_boot rst_boot;
+
 		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
 		rdiv = rst_boot.s.c_mul;	/* CPU clock */
 		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
 		f = (0x8000000000000000ull / sdiv) * 2;
+	} else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+		union cvmx_rst_boot rst_boot;
+
+		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+		rdiv = rst_boot.s.c_mul;	/* CPU clock */
+		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
+		f = (0x8000000000000000ull / sdiv) * 2;
 	}
+
 }
 
 /*
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 3778655..7d89878 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -276,7 +276,7 @@
 			continue;
 
 		/* These addresses map low for PCI. */
-		if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX))
+		if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
 			continue;
 
 		addr_size += e->size;
@@ -308,7 +308,7 @@
 #endif
 #ifdef CONFIG_USB_OCTEON_OHCI
 	/* OCTEON II ohci is only 32-bit. */
-	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
+	if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
 		swiotlbsize = 64 * (1<<20);
 #endif
 	swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 5dfef84..9eb0fee 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -767,7 +767,7 @@
 		break;
 	}
 	/* Most boards except NIC10e use a 12MHz crystal */
-	if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+	if (OCTEON_IS_OCTEON2())
 		return USB_CLOCK_TYPE_CRYSTAL_12;
 	return USB_CLOCK_TYPE_REF_48;
 }
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 2bc4aa9..10f7625 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,12 +3,14 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2012 Cavium, Inc.
+ * Copyright (C) 2004-2014 Cavium, Inc.
  */
 
+#include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/bitops.h>
+#include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
@@ -22,16 +24,25 @@
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
 
+struct octeon_irq_ciu_domain_data {
+	int num_sum;  /* number of sum registers (2 or 3). */
+};
+
 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
 
-union octeon_ciu_chip_data {
-	void *p;
-	unsigned long l;
-	struct {
-		unsigned long line:6;
-		unsigned long bit:6;
-		unsigned long gpio_line:6;
-	} s;
+struct octeon_ciu_chip_data {
+	union {
+		struct {		/* only used for ciu3 */
+			u64 ciu3_addr;
+			unsigned int intsn;
+		};
+		struct {		/* only used for ciu/ciu2 */
+			u8 line;
+			u8 bit;
+			u8 gpio_line;
+		};
+	};
+	int current_cpu;	/* Next CPU expected to take this irq */
 };
 
 struct octeon_core_chip_data {
@@ -45,27 +56,40 @@
 
 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
 
-static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
-				       struct irq_chip *chip,
-				       irq_flow_handler_t handler)
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+				      struct irq_chip *chip,
+				      irq_flow_handler_t handler)
 {
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd)
+		return -ENOMEM;
 
 	irq_set_chip_and_handler(irq, chip, handler);
 
-	cd.l = 0;
-	cd.s.line = line;
-	cd.s.bit = bit;
-	cd.s.gpio_line = gpio_line;
+	cd->line = line;
+	cd->bit = bit;
+	cd->gpio_line = gpio_line;
 
-	irq_set_chip_data(irq, cd.p);
+	irq_set_chip_data(irq, cd);
 	octeon_irq_ciu_to_irq[line][bit] = irq;
+	return 0;
 }
 
-static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
-					 int irq, int line, int bit)
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
 {
-	irq_domain_associate(domain, irq, line << 6 | bit);
+	struct irq_data *data = irq_get_irq_data(irq);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	irq_set_chip_data(irq, NULL);
+	kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+					int irq, int line, int bit)
+{
+	return irq_domain_associate(domain, irq, line << 6 | bit);
 }
 
 static int octeon_coreid_for_cpu(int cpu)
@@ -202,9 +226,10 @@
 #ifdef CONFIG_SMP
 	int cpu;
 	int weight = cpumask_weight(data->affinity);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
 	if (weight > 1) {
-		cpu = smp_processor_id();
+		cpu = cd->current_cpu;
 		for (;;) {
 			cpu = cpumask_next(cpu, data->affinity);
 			if (cpu >= nr_cpu_ids) {
@@ -219,6 +244,7 @@
 	} else {
 		cpu = smp_processor_id();
 	}
+	cd->current_cpu = cpu;
 	return cpu;
 #else
 	return smp_processor_id();
@@ -231,15 +257,15 @@
 	int coreid = octeon_coreid_for_cpu(cpu);
 	unsigned long *pen;
 	unsigned long flags;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	raw_spin_lock_irqsave(lock, flags);
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
-		__set_bit(cd.s.bit, pen);
+		__set_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -248,7 +274,7 @@
 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 	} else {
 		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
-		__set_bit(cd.s.bit, pen);
+		__set_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -263,15 +289,15 @@
 {
 	unsigned long *pen;
 	unsigned long flags;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	raw_spin_lock_irqsave(lock, flags);
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-		__set_bit(cd.s.bit, pen);
+		__set_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -280,7 +306,7 @@
 		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
 	} else {
 		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-		__set_bit(cd.s.bit, pen);
+		__set_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -295,15 +321,15 @@
 {
 	unsigned long *pen;
 	unsigned long flags;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	raw_spin_lock_irqsave(lock, flags);
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-		__clear_bit(cd.s.bit, pen);
+		__clear_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -312,7 +338,7 @@
 		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
 	} else {
 		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-		__clear_bit(cd.s.bit, pen);
+		__clear_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
@@ -328,27 +354,27 @@
 	unsigned long flags;
 	unsigned long *pen;
 	int cpu;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	raw_spinlock_t *lock;
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	for_each_online_cpu(cpu) {
 		int coreid = octeon_coreid_for_cpu(cpu);
 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 		else
 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
 		raw_spin_lock_irqsave(lock, flags);
-		__clear_bit(cd.s.bit, pen);
+		__clear_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
 		 */
 		wmb();
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 		else
 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -361,27 +387,27 @@
 	unsigned long flags;
 	unsigned long *pen;
 	int cpu;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	raw_spinlock_t *lock;
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	for_each_online_cpu(cpu) {
 		int coreid = octeon_coreid_for_cpu(cpu);
 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 		else
 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
 		raw_spin_lock_irqsave(lock, flags);
-		__set_bit(cd.s.bit, pen);
+		__set_bit(cd->bit, pen);
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
 		 * enabling the irq.
 		 */
 		wmb();
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 		else
 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -397,45 +423,106 @@
 {
 	u64 mask;
 	int cpu = next_cpu_for_irq(data);
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
 	/*
 	 * Called under the desc lock, so these should never get out
 	 * of sync.
 	 */
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		int index = octeon_coreid_for_cpu(cpu) * 2;
-		set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 	} else {
 		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-		set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 	}
 }
 
 /*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+	u64 mask;
+	int cpu = next_cpu_for_irq(data);
+	int index = octeon_coreid_for_cpu(cpu);
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+	int cpu;
+	struct octeon_ciu_chip_data *cd;
+	u64 mask;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
+
+	for_each_online_cpu(cpu) {
+		int coreid = octeon_coreid_for_cpu(cpu);
+
+		cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+	}
+}
+
+/*
  * Enable the irq on the current CPU for chips that
  * have the EN*_W1{S,C} registers.
  */
 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 {
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		int index = cvmx_get_core_num() * 2;
-		set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 	} else {
 		int index = cvmx_get_core_num() * 2 + 1;
-		set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 	}
 }
@@ -443,18 +530,18 @@
 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 {
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		int index = cvmx_get_core_num() * 2;
-		clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 	} else {
 		int index = cvmx_get_core_num() * 2 + 1;
-		clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 	}
 }
@@ -465,12 +552,12 @@
 static void octeon_irq_ciu_ack(struct irq_data *data)
 {
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		int index = cvmx_get_core_num() * 2;
 		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
 	} else {
@@ -486,21 +573,23 @@
 {
 	int cpu;
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		for_each_online_cpu(cpu) {
 			int index = octeon_coreid_for_cpu(cpu) * 2;
-			clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+			clear_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 		}
 	} else {
 		for_each_online_cpu(cpu) {
 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-			clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+			clear_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 		}
 	}
@@ -514,21 +603,23 @@
 {
 	int cpu;
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		for_each_online_cpu(cpu) {
 			int index = octeon_coreid_for_cpu(cpu) * 2;
-			set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+			set_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 		}
 	} else {
 		for_each_online_cpu(cpu) {
 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-			set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+			set_bit(cd->bit,
+				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 		}
 	}
@@ -537,10 +628,10 @@
 static void octeon_irq_gpio_setup(struct irq_data *data)
 {
 	union cvmx_gpio_bit_cfgx cfg;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	u32 t = irqd_get_trigger_type(data);
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	cfg.u64 = 0;
 	cfg.s.int_en = 1;
@@ -551,7 +642,7 @@
 	cfg.s.fil_cnt = 7;
 	cfg.s.fil_sel = 3;
 
-	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
 }
 
 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
@@ -576,36 +667,36 @@
 
 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
 {
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+	cd = irq_data_get_irq_chip_data(data);
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
 	octeon_irq_ciu_disable_all_v2(data);
 }
 
 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
 {
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+	cd = irq_data_get_irq_chip_data(data);
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
 	octeon_irq_ciu_disable_all(data);
 }
 
 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
 {
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	u64 mask;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.gpio_line);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->gpio_line);
 
 	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
+static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
 {
 	if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
 		handle_edge_irq(irq, desc);
@@ -644,11 +735,11 @@
 	int cpu;
 	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 	unsigned long flags;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 	unsigned long *pen;
 	raw_spinlock_t *lock;
 
-	cd.p = irq_data_get_irq_chip_data(data);
+	cd = irq_data_get_irq_chip_data(data);
 
 	/*
 	 * For non-v2 CIU, we will allow only single CPU affinity.
@@ -668,16 +759,16 @@
 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 		raw_spin_lock_irqsave(lock, flags);
 
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 		else
 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
 			enable_one = 0;
-			__set_bit(cd.s.bit, pen);
+			__set_bit(cd->bit, pen);
 		} else {
-			__clear_bit(cd.s.bit, pen);
+			__clear_bit(cd->bit, pen);
 		}
 		/*
 		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -685,7 +776,7 @@
 		 */
 		wmb();
 
-		if (cd.s.line == 0)
+		if (cd->line == 0)
 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
 		else
 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -706,24 +797,24 @@
 	int cpu;
 	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
 	if (!enable_one)
 		return 0;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << cd.s.bit;
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
 
-	if (cd.s.line == 0) {
+	if (cd->line == 0) {
 		for_each_online_cpu(cpu) {
 			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
 			int index = octeon_coreid_for_cpu(cpu) * 2;
 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
 				enable_one = false;
-				set_bit(cd.s.bit, pen);
+				set_bit(cd->bit, pen);
 				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
 			} else {
-				clear_bit(cd.s.bit, pen);
+				clear_bit(cd->bit, pen);
 				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
 			}
 		}
@@ -733,16 +824,44 @@
 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
 				enable_one = false;
-				set_bit(cd.s.bit, pen);
+				set_bit(cd->bit, pen);
 				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
 			} else {
-				clear_bit(cd.s.bit, pen);
+				clear_bit(cd->bit, pen);
 				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
 			}
 		}
 	}
 	return 0;
 }
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+					    const struct cpumask *dest,
+					    bool force)
+{
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	u64 mask;
+	struct octeon_ciu_chip_data *cd;
+
+	if (!enable_one)
+		return 0;
+
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
+
+	for_each_online_cpu(cpu) {
+		int index = octeon_coreid_for_cpu(cpu);
+
+		if (cpumask_test_cpu(cpu, dest) && enable_one) {
+			enable_one = false;
+			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+		} else {
+			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+		}
+	}
+	return 0;
+}
 #endif
 
 /*
@@ -752,6 +871,18 @@
 	.name = "CIU",
 	.irq_enable = octeon_irq_ciu_enable_v2,
 	.irq_disable = octeon_irq_ciu_disable_all_v2,
+	.irq_mask = octeon_irq_ciu_disable_local_v2,
+	.irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_v2,
+	.irq_disable = octeon_irq_ciu_disable_all_v2,
 	.irq_ack = octeon_irq_ciu_ack,
 	.irq_mask = octeon_irq_ciu_disable_local_v2,
 	.irq_unmask = octeon_irq_ciu_enable_v2,
@@ -761,10 +892,50 @@
 #endif
 };
 
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_sum2,
+	.irq_disable = octeon_irq_ciu_disable_all_sum2,
+	.irq_mask = octeon_irq_ciu_disable_local_sum2,
+	.irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable_sum2,
+	.irq_disable = octeon_irq_ciu_disable_all_sum2,
+	.irq_ack = octeon_irq_ciu_ack_sum2,
+	.irq_mask = octeon_irq_ciu_disable_local_sum2,
+	.irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
 static struct irq_chip octeon_irq_chip_ciu = {
 	.name = "CIU",
 	.irq_enable = octeon_irq_ciu_enable,
 	.irq_disable = octeon_irq_ciu_disable_all,
+	.irq_mask = octeon_irq_ciu_disable_local,
+	.irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
+	.name = "CIU",
+	.irq_enable = octeon_irq_ciu_enable,
+	.irq_disable = octeon_irq_ciu_disable_all,
 	.irq_ack = octeon_irq_ciu_ack,
 	.irq_mask = octeon_irq_ciu_disable_local,
 	.irq_unmask = octeon_irq_ciu_enable,
@@ -970,11 +1141,12 @@
 			       unsigned int *out_type)
 {
 	unsigned int ciu, bit;
+	struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
 	ciu = intspec[0];
 	bit = intspec[1];
 
-	if (ciu > 1 || bit > 63)
+	if (ciu >= dd->num_sum || bit > 63)
 		return -EINVAL;
 
 	*out_hwirq = (ciu << 6) | bit;
@@ -984,6 +1156,7 @@
 }
 
 static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
 static struct irq_chip *octeon_irq_gpio_chip;
 
 static bool octeon_irq_virq_in_range(unsigned int virq)
@@ -999,8 +1172,10 @@
 static int octeon_irq_ciu_map(struct irq_domain *d,
 			      unsigned int virq, irq_hw_number_t hw)
 {
+	int rv;
 	unsigned int line = hw >> 6;
 	unsigned int bit = hw & 63;
+	struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
 	if (!octeon_irq_virq_in_range(virq))
 		return -EINVAL;
@@ -1009,54 +1184,61 @@
 	if (line == 0 && bit >= 16 && bit <32)
 		return 0;
 
-	if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
-	if (octeon_irq_ciu_is_edge(line, bit))
-		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-					   octeon_irq_ciu_chip,
-					   handle_edge_irq);
-	else
-		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-					   octeon_irq_ciu_chip,
-					   handle_level_irq);
-
-	return 0;
+	if (line == 2) {
+		if (octeon_irq_ciu_is_edge(line, bit))
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				&octeon_irq_chip_ciu_sum2_edge,
+				handle_edge_irq);
+		else
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				&octeon_irq_chip_ciu_sum2,
+				handle_level_irq);
+	} else {
+		if (octeon_irq_ciu_is_edge(line, bit))
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				octeon_irq_ciu_chip_edge,
+				handle_edge_irq);
+		else
+			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+				octeon_irq_ciu_chip,
+				handle_level_irq);
+	}
+	return rv;
 }
 
-static int octeon_irq_gpio_map_common(struct irq_domain *d,
-				      unsigned int virq, irq_hw_number_t hw,
-				      int line_limit, struct irq_chip *chip)
+static int octeon_irq_gpio_map(struct irq_domain *d,
+			       unsigned int virq, irq_hw_number_t hw)
 {
 	struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
 	unsigned int line, bit;
+	int r;
 
 	if (!octeon_irq_virq_in_range(virq))
 		return -EINVAL;
 
 	line = (hw + gpiod->base_hwirq) >> 6;
 	bit = (hw + gpiod->base_hwirq) & 63;
-	if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
+	if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+		octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
-	octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-				   chip, octeon_irq_handle_gpio);
-	return 0;
-}
-
-static int octeon_irq_gpio_map(struct irq_domain *d,
-			       unsigned int virq, irq_hw_number_t hw)
-{
-	return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
+	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+		octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+	return r;
 }
 
 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
 	.map = octeon_irq_ciu_map,
+	.unmap = octeon_irq_free_cd,
 	.xlate = octeon_irq_ciu_xlat,
 };
 
 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
 	.map = octeon_irq_gpio_map,
+	.unmap = octeon_irq_free_cd,
 	.xlate = octeon_irq_gpio_xlat,
 };
 
@@ -1095,6 +1277,26 @@
 	}
 }
 
+static void octeon_irq_ip4_ciu(void)
+{
+	int coreid = cvmx_get_core_num();
+	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+	u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+	ciu_sum &= ciu_en;
+	if (likely(ciu_sum)) {
+		int bit = fls64(ciu_sum) - 1;
+		int irq = octeon_irq_ciu_to_irq[2][bit];
+
+		if (likely(irq))
+			do_IRQ(irq);
+		else
+			spurious_interrupt();
+	} else {
+		spurious_interrupt();
+	}
+}
+
 static bool octeon_irq_use_ip4;
 
 static void octeon_irq_local_enable_ip4(void *arg)
@@ -1176,7 +1378,10 @@
 
 	/* Enable the CIU lines */
 	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-	clear_c0_status(STATUSF_IP4);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
 }
 
 static void octeon_irq_setup_secondary_ciu2(void)
@@ -1192,95 +1397,194 @@
 		clear_c0_status(STATUSF_IP4);
 }
 
-static void __init octeon_irq_init_ciu(void)
+static int __init octeon_irq_init_ciu(
+	struct device_node *ciu_node, struct device_node *parent)
 {
-	unsigned int i;
+	unsigned int i, r;
 	struct irq_chip *chip;
+	struct irq_chip *chip_edge;
 	struct irq_chip *chip_mbox;
 	struct irq_chip *chip_wd;
-	struct device_node *gpio_node;
-	struct device_node *ciu_node;
 	struct irq_domain *ciu_domain = NULL;
+	struct octeon_irq_ciu_domain_data *dd;
+
+	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd)
+		return -ENOMEM;
 
 	octeon_irq_init_ciu_percpu();
 	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
 
 	octeon_irq_ip2 = octeon_irq_ip2_ciu;
 	octeon_irq_ip3 = octeon_irq_ip3_ciu;
+	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+		&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		octeon_irq_ip4 =  octeon_irq_ip4_ciu;
+		dd->num_sum = 3;
+		octeon_irq_use_ip4 = true;
+	} else {
+		octeon_irq_ip4 = octeon_irq_ip4_mask;
+		dd->num_sum = 2;
+		octeon_irq_use_ip4 = false;
+	}
 	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
-	    OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+	    OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
 		chip = &octeon_irq_chip_ciu_v2;
+		chip_edge = &octeon_irq_chip_ciu_v2_edge;
 		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
 		chip_wd = &octeon_irq_chip_ciu_wd_v2;
 		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
 	} else {
 		chip = &octeon_irq_chip_ciu;
+		chip_edge = &octeon_irq_chip_ciu_edge;
 		chip_mbox = &octeon_irq_chip_ciu_mbox;
 		chip_wd = &octeon_irq_chip_ciu_wd;
 		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
 	}
 	octeon_irq_ciu_chip = chip;
-	octeon_irq_ip4 = octeon_irq_ip4_mask;
+	octeon_irq_ciu_chip_edge = chip_edge;
 
 	/* Mips internal */
 	octeon_irq_init_core();
 
-	gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-	if (gpio_node) {
-		struct octeon_irq_gpio_domain_data *gpiod;
-
-		gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-		if (gpiod) {
-			/* gpio domain host_data is the base hwirq number. */
-			gpiod->base_hwirq = 16;
-			irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
-			of_node_put(gpio_node);
-		} else
-			pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-	} else
-		pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-	ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
-	if (ciu_node) {
-		ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
-		irq_set_default_host(ciu_domain);
-		of_node_put(ciu_node);
-	} else
-		panic("Cannot find device node for cavium,octeon-3860-ciu.");
+	ciu_domain = irq_domain_add_tree(
+		ciu_node, &octeon_irq_domain_ciu_ops, dd);
+	irq_set_default_host(ciu_domain);
 
 	/* CIU_0 */
-	for (i = 0; i < 16; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+	for (i = 0; i < 16; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+		if (r)
+			goto err;
+	}
 
-	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
-	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+	r = octeon_irq_set_ciu_mapping(
+		OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+	if (r)
+		goto err;
+	r = octeon_irq_set_ciu_mapping(
+		OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+	if (r)
+		goto err;
 
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+		if (r)
+			goto err;
+	}
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+		if (r)
+			goto err;
+	}
 
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+	if (r)
+		goto err;
 
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+	if (r)
+		goto err;
+
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+		if (r)
+			goto err;
+	}
+
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+	if (r)
+		goto err;
+
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+	if (r)
+		goto err;
 
 	/* CIU_1 */
-	for (i = 0; i < 16; i++)
-		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
+	for (i = 0; i < 16; i++) {
+		r = octeon_irq_set_ciu_mapping(
+			i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+			handle_level_irq);
+		if (r)
+			goto err;
+	}
 
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+	if (r)
+		goto err;
 
 	/* Enable the CIU lines */
 	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-	clear_c0_status(STATUSF_IP4);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+
+	return 0;
+err:
+	return r;
 }
 
+static int __init octeon_irq_init_gpio(
+	struct device_node *gpio_node, struct device_node *parent)
+{
+	struct octeon_irq_gpio_domain_data *gpiod;
+	u32 interrupt_cells;
+	unsigned int base_hwirq;
+	int r;
+
+	r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+	if (r)
+		return r;
+
+	if (interrupt_cells == 1) {
+		u32 v;
+
+		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		base_hwirq = v;
+	} else if (interrupt_cells == 2) {
+		u32 v0, v1;
+
+		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+		if (r) {
+			pr_warn("No \"interrupts\" property.\n");
+			return r;
+		}
+		base_hwirq = (v0 << 6) | v1;
+	} else {
+		pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+			interrupt_cells);
+		return -EINVAL;
+	}
+
+	gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+	if (gpiod) {
+		/* gpio domain host_data is the base hwirq number. */
+		gpiod->base_hwirq = base_hwirq;
+		irq_domain_add_linear(
+			gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+	} else {
+		pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
 /*
  * Watchdog interrupts are special.  They are associated with a single
  * core, so we hardwire the affinity to that core.
@@ -1290,12 +1594,13 @@
 	u64 mask;
 	u64 en_addr;
 	int coreid = data->irq - OCTEON_IRQ_WDOG0;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
 	cvmx_write_csr(en_addr, mask);
 
 }
@@ -1306,12 +1611,13 @@
 	u64 en_addr;
 	int cpu = next_cpu_for_irq(data);
 	int coreid = octeon_coreid_for_cpu(cpu);
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
 	cvmx_write_csr(en_addr, mask);
 }
 
@@ -1320,12 +1626,13 @@
 	u64 mask;
 	u64 en_addr;
 	int coreid = cvmx_get_core_num();
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+		(0x1000ull * cd->line);
 	cvmx_write_csr(en_addr, mask);
 
 }
@@ -1335,12 +1642,13 @@
 	u64 mask;
 	u64 en_addr;
 	int coreid = cvmx_get_core_num();
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
+	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+		(0x1000ull * cd->line);
 	cvmx_write_csr(en_addr, mask);
 
 }
@@ -1350,12 +1658,12 @@
 	u64 mask;
 	u64 en_addr;
 	int coreid = cvmx_get_core_num();
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
-	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
+	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
 	cvmx_write_csr(en_addr, mask);
 
 }
@@ -1364,13 +1672,14 @@
 {
 	int cpu;
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << (cd.s.bit);
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << (cd->bit);
 
 	for_each_online_cpu(cpu) {
-		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
 		cvmx_write_csr(en_addr, mask);
 	}
 }
@@ -1383,7 +1692,8 @@
 	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
 	for_each_online_cpu(cpu) {
-		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+			octeon_coreid_for_cpu(cpu));
 		cvmx_write_csr(en_addr, mask);
 	}
 }
@@ -1396,7 +1706,8 @@
 	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
 	for_each_online_cpu(cpu) {
-		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
+		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+			octeon_coreid_for_cpu(cpu));
 		cvmx_write_csr(en_addr, mask);
 	}
 }
@@ -1430,21 +1741,25 @@
 	int cpu;
 	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
 	u64 mask;
-	union octeon_ciu_chip_data cd;
+	struct octeon_ciu_chip_data *cd;
 
 	if (!enable_one)
 		return 0;
 
-	cd.p = irq_data_get_irq_chip_data(data);
-	mask = 1ull << cd.s.bit;
+	cd = irq_data_get_irq_chip_data(data);
+	mask = 1ull << cd->bit;
 
 	for_each_online_cpu(cpu) {
 		u64 en_addr;
 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
 			enable_one = false;
-			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+				octeon_coreid_for_cpu(cpu)) +
+				(0x1000ull * cd->line);
 		} else {
-			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+				octeon_coreid_for_cpu(cpu)) +
+				(0x1000ull * cd->line);
 		}
 		cvmx_write_csr(en_addr, mask);
 	}
@@ -1461,10 +1776,11 @@
 
 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
 {
-	union octeon_ciu_chip_data cd;
-	cd.p = irq_data_get_irq_chip_data(data);
+	struct octeon_ciu_chip_data *cd;
 
-	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+	cd = irq_data_get_irq_chip_data(data);
+
+	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
 	octeon_irq_ciu2_disable_all(data);
 }
@@ -1473,6 +1789,18 @@
 	.name = "CIU2-E",
 	.irq_enable = octeon_irq_ciu2_enable,
 	.irq_disable = octeon_irq_ciu2_disable_all,
+	.irq_mask = octeon_irq_ciu2_disable_local,
+	.irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
+	.name = "CIU2-E",
+	.irq_enable = octeon_irq_ciu2_enable,
+	.irq_disable = octeon_irq_ciu2_disable_all,
 	.irq_ack = octeon_irq_ciu2_ack,
 	.irq_mask = octeon_irq_ciu2_disable_local,
 	.irq_unmask = octeon_irq_ciu2_enable,
@@ -1582,7 +1910,7 @@
 
 	if (octeon_irq_ciu2_is_edge(line, bit))
 		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-					   &octeon_irq_chip_ciu2,
+					   &octeon_irq_chip_ciu2_edge,
 					   handle_edge_irq);
 	else
 		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
@@ -1591,22 +1919,13 @@
 
 	return 0;
 }
-static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
-				    unsigned int virq, irq_hw_number_t hw)
-{
-	return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
-}
 
 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
 	.map = octeon_irq_ciu2_map,
+	.unmap = octeon_irq_free_cd,
 	.xlate = octeon_irq_ciu2_xlat,
 };
 
-static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
-	.map = octeon_irq_ciu2_gpio_map,
-	.xlate = octeon_irq_gpio_xlat,
-};
-
 static void octeon_irq_ciu2(void)
 {
 	int line;
@@ -1674,16 +1993,16 @@
 	return;
 }
 
-static void __init octeon_irq_init_ciu2(void)
+static int __init octeon_irq_init_ciu2(
+	struct device_node *ciu_node, struct device_node *parent)
 {
-	unsigned int i;
-	struct device_node *gpio_node;
-	struct device_node *ciu_node;
+	unsigned int i, r;
 	struct irq_domain *ciu_domain = NULL;
 
 	octeon_irq_init_ciu2_percpu();
 	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
 
+	octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
 	octeon_irq_ip2 = octeon_irq_ciu2;
 	octeon_irq_ip3 = octeon_irq_ciu2_mbox;
 	octeon_irq_ip4 = octeon_irq_ip4_mask;
@@ -1691,47 +2010,49 @@
 	/* Mips internal */
 	octeon_irq_init_core();
 
-	gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-	if (gpio_node) {
-		struct octeon_irq_gpio_domain_data *gpiod;
-
-		gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-		if (gpiod) {
-			/* gpio domain host_data is the base hwirq number. */
-			gpiod->base_hwirq = 7 << 6;
-			irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
-			of_node_put(gpio_node);
-		} else
-			pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-	} else
-		pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-	ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
-	if (ciu_node) {
-		ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
-		irq_set_default_host(ciu_domain);
-		of_node_put(ciu_node);
-	} else
-		panic("Cannot find device node for cavium,octeon-6880-ciu2.");
+	ciu_domain = irq_domain_add_tree(
+		ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+	irq_set_default_host(ciu_domain);
 
 	/* CUI2 */
-	for (i = 0; i < 64; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+	for (i = 0; i < 64; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+		if (r)
+			goto err;
+	}
 
-	for (i = 0; i < 32; i++)
-		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
-					   &octeon_irq_chip_ciu2_wd, handle_level_irq);
+	for (i = 0; i < 32; i++) {
+		r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+			&octeon_irq_chip_ciu2_wd, handle_level_irq);
+		if (r)
+			goto err;
+	}
 
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+		if (r)
+			goto err;
+	}
 
-	octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+	if (r)
+		goto err;
 
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+		if (r)
+			goto err;
+	}
 
-	for (i = 0; i < 4; i++)
-		octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+	for (i = 0; i < 4; i++) {
+		r = octeon_irq_force_ciu_mapping(
+			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+		if (r)
+			goto err;
+	}
 
 	irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
 	irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
@@ -1741,8 +2062,242 @@
 	/* Enable the CIU lines */
 	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
 	clear_c0_status(STATUSF_IP4);
+	return 0;
+err:
+	return r;
 }
 
+struct octeon_irq_cib_host_data {
+	raw_spinlock_t lock;
+	u64 raw_reg;
+	u64 en_reg;
+	int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+	struct octeon_irq_cib_host_data *host_data;
+	int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+	unsigned long flags;
+	u64 en;
+	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+	struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+	raw_spin_lock_irqsave(&host_data->lock, flags);
+	en = cvmx_read_csr(host_data->en_reg);
+	en |= 1ull << cd->bit;
+	cvmx_write_csr(host_data->en_reg, en);
+	raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+	unsigned long flags;
+	u64 en;
+	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+	struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+	raw_spin_lock_irqsave(&host_data->lock, flags);
+	en = cvmx_read_csr(host_data->en_reg);
+	en &= ~(1ull << cd->bit);
+	cvmx_write_csr(host_data->en_reg, en);
+	raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+	irqd_set_trigger_type(data, t);
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+	.name = "CIB",
+	.irq_enable = octeon_irq_cib_enable,
+	.irq_disable = octeon_irq_cib_disable,
+	.irq_mask = octeon_irq_cib_disable,
+	.irq_unmask = octeon_irq_cib_enable,
+	.irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+				   struct device_node *node,
+				   const u32 *intspec,
+				   unsigned int intsize,
+				   unsigned long *out_hwirq,
+				   unsigned int *out_type)
+{
+	unsigned int type = 0;
+
+	if (intsize == 2)
+		type = intspec[1];
+
+	switch (type) {
+	case 0: /* unofficial value, but we might as well let it work. */
+	case 4: /* official value for level triggering. */
+		*out_type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case 1: /* official value for edge triggering. */
+		*out_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	default: /* Nothing else is acceptable. */
+		return -EINVAL;
+	}
+
+	*out_hwirq = intspec[0];
+
+	return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+			      unsigned int virq, irq_hw_number_t hw)
+{
+	struct octeon_irq_cib_host_data *host_data = d->host_data;
+	struct octeon_irq_cib_chip_data *cd;
+
+	if (hw >= host_data->max_bits) {
+		pr_err("ERROR: %s mapping %u is to big!\n",
+		       d->of_node->name, (unsigned)hw);
+		return -EINVAL;
+	}
+
+	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+	cd->host_data = host_data;
+	cd->bit = hw;
+
+	irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+				 handle_simple_irq);
+	irq_set_chip_data(virq, cd);
+	return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+	.map = octeon_irq_cib_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+	u64 en;
+	u64 raw;
+	u64 bits;
+	int i;
+	int irq;
+	struct irq_domain *cib_domain = data;
+	struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+	en = cvmx_read_csr(host_data->en_reg);
+	raw = cvmx_read_csr(host_data->raw_reg);
+
+	bits = en & raw;
+
+	for (i = 0; i < host_data->max_bits; i++) {
+		if ((bits & 1ull << i) == 0)
+			continue;
+		irq = irq_find_mapping(cib_domain, i);
+		if (!irq) {
+			unsigned long flags;
+
+			pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+				i, host_data->raw_reg);
+			raw_spin_lock_irqsave(&host_data->lock, flags);
+			en = cvmx_read_csr(host_data->en_reg);
+			en &= ~(1ull << i);
+			cvmx_write_csr(host_data->en_reg, en);
+			cvmx_write_csr(host_data->raw_reg, 1ull << i);
+			raw_spin_unlock_irqrestore(&host_data->lock, flags);
+		} else {
+			struct irq_desc *desc = irq_to_desc(irq);
+			struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+			/* If edge, acknowledge the bit we will be sending. */
+			if (irqd_get_trigger_type(irq_data) &
+				IRQ_TYPE_EDGE_BOTH)
+				cvmx_write_csr(host_data->raw_reg, 1ull << i);
+			generic_handle_irq_desc(irq, desc);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+				      struct device_node *parent)
+{
+	const __be32 *addr;
+	u32 val;
+	struct octeon_irq_cib_host_data *host_data;
+	int parent_irq;
+	int r;
+	struct irq_domain *cib_domain;
+
+	parent_irq = irq_of_parse_and_map(ciu_node, 0);
+	if (!parent_irq) {
+		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+			ciu_node->name);
+		return -EINVAL;
+	}
+
+	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+	raw_spin_lock_init(&host_data->lock);
+
+	addr = of_get_address(ciu_node, 0, NULL, NULL);
+	if (!addr) {
+		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+		return -EINVAL;
+	}
+	host_data->raw_reg = (u64)phys_to_virt(
+		of_translate_address(ciu_node, addr));
+
+	addr = of_get_address(ciu_node, 1, NULL, NULL);
+	if (!addr) {
+		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+		return -EINVAL;
+	}
+	host_data->en_reg = (u64)phys_to_virt(
+		of_translate_address(ciu_node, addr));
+
+	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+	if (r) {
+		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+			ciu_node->name);
+		return r;
+	}
+	host_data->max_bits = val;
+
+	cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+					   &octeon_irq_domain_cib_ops,
+					   host_data);
+	if (!cib_domain) {
+		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+		return -ENOMEM;
+	}
+
+	cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+	cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+	r = request_irq(parent_irq, octeon_irq_cib_handler,
+			IRQF_NO_THREAD, "cib", cib_domain);
+	if (r) {
+		pr_err("request_irq cib failed %d\n", r);
+		return r;
+	}
+	pr_info("CIB interrupt controller probed: %llx %d\n",
+		host_data->raw_reg, host_data->max_bits);
+	return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+	{}
+};
+
 void __init arch_init_irq(void)
 {
 #ifdef CONFIG_SMP
@@ -1750,10 +2305,7 @@
 	cpumask_clear(irq_default_affinity);
 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 #endif
-	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-		octeon_irq_init_ciu2();
-	else
-		octeon_irq_init_ciu();
+	of_irq_init(ciu_types);
 }
 
 asmlinkage void plat_irq_dispatch(void)
@@ -1767,13 +2319,13 @@
 		cop0_cause &= cop0_status;
 		cop0_cause &= ST0_IM;
 
-		if (unlikely(cop0_cause & STATUSF_IP2))
+		if (cop0_cause & STATUSF_IP2)
 			octeon_irq_ip2();
-		else if (unlikely(cop0_cause & STATUSF_IP3))
+		else if (cop0_cause & STATUSF_IP3)
 			octeon_irq_ip3();
-		else if (unlikely(cop0_cause & STATUSF_IP4))
+		else if (cop0_cause & STATUSF_IP4)
 			octeon_irq_ip4();
-		else if (likely(cop0_cause))
+		else if (cop0_cause)
 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
 		else
 			break;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 94f888d..a42110e 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -41,6 +41,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/pci-octeon.h>
 #include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
 
 extern struct plat_smp_ops octeon_smp_ops;
 
@@ -579,12 +580,10 @@
 	/* R/W If set, CVMSEG is available for loads/stores in user
 	 * mode. */
 	cvmmemctl.s.cvmsegenau = 0;
-	/* R/W Size of local memory in cache blocks, 54 (6912 bytes)
-	 * is max legal value. */
-	cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
 
 	write_c0_cvmmemctl(cvmmemctl.u64);
 
+	/* Setup of CVMSEG is done in kernel-entry-init.h */
 	if (smp_processor_id() == 0)
 		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
 			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
@@ -615,6 +614,7 @@
 	const char *arg;
 	char *p;
 	int i;
+	u64 t;
 	int argc;
 #ifdef CONFIG_CAVIUM_RESERVE32
 	int64_t addr = -1;
@@ -654,15 +654,56 @@
 	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
 	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 
-	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+	if (OCTEON_IS_OCTEON2()) {
 		/* I/O clock runs at a different rate than the CPU. */
 		union cvmx_mio_rst_boot rst_boot;
 		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
 		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+	} else if (OCTEON_IS_OCTEON3()) {
+		/* I/O clock runs at a different rate than the CPU. */
+		union cvmx_rst_boot rst_boot;
+		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+		octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
 	} else {
 		octeon_io_clock_rate = sysinfo->cpu_clock_hz;
 	}
 
+	t = read_c0_cvmctl();
+	if ((t & (1ull << 27)) == 0) {
+		/*
+		 * Setup the multiplier save/restore code if
+		 * CvmCtl[NOMUL] clear.
+		 */
+		void *save;
+		void *save_end;
+		void *restore;
+		void *restore_end;
+		int save_len;
+		int restore_len;
+		int save_max = (char *)octeon_mult_save_end -
+			(char *)octeon_mult_save;
+		int restore_max = (char *)octeon_mult_restore_end -
+			(char *)octeon_mult_restore;
+		if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+			save = octeon_mult_save3;
+			save_end = octeon_mult_save3_end;
+			restore = octeon_mult_restore3;
+			restore_end = octeon_mult_restore3_end;
+		} else {
+			save = octeon_mult_save2;
+			save_end = octeon_mult_save2_end;
+			restore = octeon_mult_restore2;
+			restore_end = octeon_mult_restore2_end;
+		}
+		save_len = (char *)save_end - (char *)save;
+		restore_len = (char *)restore_end - (char *)restore;
+		if (!WARN_ON(save_len > save_max ||
+				restore_len > restore_max)) {
+			memcpy(octeon_mult_save, save, save_len);
+			memcpy(octeon_mult_restore, restore, restore_len);
+		}
+	}
+
 	/*
 	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
 	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -1004,7 +1045,7 @@
 
 void prom_free_prom_memory(void)
 {
-	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+	if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
 		/* Check for presence of Core-14449 fix.  */
 		u32 insn;
 		u32 *foo;
@@ -1026,8 +1067,9 @@
 			panic("No PREF instruction at Core-14449 probe point.");
 
 		if (((insn >> 16) & 0x1f) != 28)
-			panic("Core-14449 WAR not in place (%04x).\n"
-			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
+			panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+			      "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+			      insn);
 	}
 }
 
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644
index 0000000..4bce1f8
--- /dev/null
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -0,0 +1,193 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c
index f9f5307..19f7101 100644
--- a/arch/mips/fw/arc/misc.c
+++ b/arch/mips/fw/arc/misc.c
@@ -9,6 +9,7 @@
  * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
@@ -19,50 +20,55 @@
 #include <asm/sgialib.h>
 #include <asm/bootinfo.h>
 
-VOID
+VOID __noreturn
 ArcHalt(VOID)
 {
 	bc_disable();
 	local_irq_disable();
 	ARC_CALL0(halt);
-never:	goto never;
+
+	unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcPowerDown(VOID)
 {
 	bc_disable();
 	local_irq_disable();
 	ARC_CALL0(pdown);
-never:	goto never;
+
+	unreachable();
 }
 
 /* XXX is this a soft reset basically? XXX */
-VOID
+VOID __noreturn
 ArcRestart(VOID)
 {
 	bc_disable();
 	local_irq_disable();
 	ARC_CALL0(restart);
-never:	goto never;
+
+	unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcReboot(VOID)
 {
 	bc_disable();
 	local_irq_disable();
 	ARC_CALL0(reboot);
-never:	goto never;
+
+	unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcEnterInteractiveMode(VOID)
 {
 	bc_disable();
 	local_irq_disable();
 	ARC_CALL0(imode);
-never:	goto never;
+
+	unreachable();
 }
 
 LONG
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 200efea..526539c 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,5 @@
 # MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6caf876..0cae459 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,7 +19,7 @@
 #include <asm/asmmacro-64.h>
 #endif
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	.macro	local_irq_enable reg=t0
 	ei
 	irq_enable_hazard
@@ -104,7 +104,8 @@
 	.endm
 
 	.macro	fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f
 	fpu_save_16odd \thread
@@ -160,7 +161,8 @@
 	.endm
 
 	.macro	fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	sll	\tmp, \status, 5
 	bgez	\tmp, 10f				# 16 register mode?
 
@@ -170,16 +172,16 @@
 	fpu_restore_16even \thread \tmp
 	.endm
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	.macro	_EXT	rd, rs, p, s
 	ext	\rd, \rs, \p, \s
 	.endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 	.macro	_EXT	rd, rs, p, s
 	srl	\rd, \rs, \p
 	andi	\rd, \rd, (1 << \s) - 1
 	.endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 
 /*
  * Temporary until all gas have MT ASE support
@@ -304,7 +306,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	add	$1, \base, \off
+	addu	$1, \base, \off
 	.word	LDD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -313,7 +315,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	add	$1, \base, \off
+	addu	$1, \base, \off
 	.word	STD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 857da84..26d4363 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -54,19 +54,19 @@
 		"	sc	%0, %1					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	.set	mips0					\n"   \
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)	      \
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 		int temp;						      \
 									      \
 		do {							      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	ll	%0, %1		# atomic_" #op "\n"   \
 			"	" #asm_op " %0, %2			\n"   \
 			"	sc	%0, %1				\n"   \
 			"	.set	mips0				\n"   \
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
 			: "Ir" (i));					      \
 		} while (unlikely(!temp));				      \
 	} else {							      \
@@ -97,20 +97,20 @@
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	.set	mips0					\n"   \
 		: "=&r" (result), "=&r" (temp),				      \
-		  "+" GCC_OFF12_ASM() (v->counter)			      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 		int temp;						      \
 									      \
 		do {							      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	ll	%1, %2	# atomic_" #op "_return	\n"   \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	sc	%0, %2				\n"   \
 			"	.set	mips0				\n"   \
 			: "=&r" (result), "=&r" (temp),			      \
-			  "+" GCC_OFF12_ASM() (v->counter)		      \
+			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
 			: "Ir" (i));					      \
 		} while (unlikely(!result));				      \
 									      \
@@ -171,14 +171,14 @@
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
-		: "Ir" (i), GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
+		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
 		: "memory");
 	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
 		"	subu	%0, %1, %3				\n"
 		"	bltz	%0, 1f					\n"
@@ -190,7 +190,7 @@
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
 		: "Ir" (i));
 	} else {
 		unsigned long flags;
@@ -333,19 +333,19 @@
 		"	scd	%0, %1					\n"   \
 		"	beqzl	%0, 1b					\n"   \
 		"	.set	mips0					\n"   \
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)	      \
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 		long temp;						      \
 									      \
 		do {							      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	lld	%0, %1		# atomic64_" #op "\n" \
 			"	" #asm_op " %0, %2			\n"   \
 			"	scd	%0, %1				\n"   \
 			"	.set	mips0				\n"   \
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
 			: "Ir" (i));					      \
 		} while (unlikely(!temp));				      \
 	} else {							      \
@@ -376,21 +376,21 @@
 		"	" #asm_op " %0, %1, %3				\n"   \
 		"	.set	mips0					\n"   \
 		: "=&r" (result), "=&r" (temp),				      \
-		  "+" GCC_OFF12_ASM() (v->counter)			      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
 		: "Ir" (i));						      \
 	} else if (kernel_uses_llsc) {					      \
 		long temp;						      \
 									      \
 		do {							      \
 			__asm__ __volatile__(				      \
-			"	.set	arch=r4000			\n"   \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 			"	lld	%1, %2	# atomic64_" #op "_return\n"  \
 			"	" #asm_op " %0, %1, %3			\n"   \
 			"	scd	%0, %2				\n"   \
 			"	.set	mips0				\n"   \
 			: "=&r" (result), "=&r" (temp),			      \
-			  "=" GCC_OFF12_ASM() (v->counter)		      \
-			: "Ir" (i), GCC_OFF12_ASM() (v->counter)	      \
+			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
+			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
 			: "memory");					      \
 		} while (unlikely(!result));				      \
 									      \
@@ -452,14 +452,14 @@
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
-		  "=" GCC_OFF12_ASM() (v->counter)
-		: "Ir" (i), GCC_OFF12_ASM() (v->counter)
+		  "=" GCC_OFF_SMALL_ASM() (v->counter)
+		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
 		: "memory");
 	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
 		"	dsubu	%0, %1, %3				\n"
 		"	bltz	%0, 1f					\n"
@@ -471,7 +471,7 @@
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp),
-		  "+" GCC_OFF12_ASM() (v->counter)
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)
 		: "Ir" (i));
 	} else {
 		unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6663bcc..9f935f6 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,28 +79,28 @@
 		"	" __SC	"%0, %1					\n"
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
-		: "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 		do {
 			__asm__ __volatile__(
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	" __INS "%0, %3, %2, 1			\n"
 			"	" __SC "%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (bit), "r" (~0));
 		} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 	} else if (kernel_uses_llsc) {
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# set_bit	\n"
 			"	or	%0, %2				\n"
 			"	" __SC	"%0, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
 	} else
@@ -131,28 +131,28 @@
 		"	" __SC "%0, %1					\n"
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 		: "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 		do {
 			__asm__ __volatile__(
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	" __INS "%0, $0, %2, 1			\n"
 			"	" __SC "%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (bit));
 		} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 	} else if (kernel_uses_llsc) {
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# clear_bit	\n"
 			"	and	%0, %2				\n"
 			"	" __SC "%0, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (~(1UL << bit)));
 		} while (unlikely(!temp));
 	} else
@@ -197,7 +197,7 @@
 		"	" __SC	"%0, %1				\n"
 		"	beqzl	%0, 1b				\n"
 		"	.set	mips0				\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 		: "ir" (1UL << bit));
 	} else if (kernel_uses_llsc) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1		# change_bit	\n"
 			"	xor	%0, %2				\n"
 			"	" __SC	"%0, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
 	} else
@@ -245,7 +245,7 @@
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "memory");
 	} else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "memory");
 		} while (unlikely(!res));
@@ -308,12 +308,12 @@
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL "%0, %1	# test_and_set_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "memory");
 		} while (unlikely(!res));
@@ -355,10 +355,10 @@
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp;
@@ -369,7 +369,7 @@
 			"	" __EXT "%2, %0, %3, 1			\n"
 			"	" __INS "%0, $0, %3, 1			\n"
 			"	" __SC	"%0, %1				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "ir" (bit)
 			: "memory");
 		} while (unlikely(!temp));
@@ -380,13 +380,13 @@
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
 			"	or	%2, %0, %3			\n"
 			"	xor	%2, %3				\n"
 			"	" __SC	"%2, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "memory");
 		} while (unlikely(!res));
@@ -428,7 +428,7 @@
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 		: "r" (1UL << bit)
 		: "memory");
 	} else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	" __LL	"%0, %1 # test_and_change_bit	\n"
 			"	xor	%2, %0, %3			\n"
 			"	" __SC	"\t%2, %1			\n"
 			"	.set	mips0				\n"
-			: "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
 			: "r" (1UL << bit)
 			: "memory");
 		} while (unlikely(!res));
@@ -485,7 +485,7 @@
 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 		__asm__(
 		"	.set	push					\n"
-		"	.set	mips32					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	clz	%0, %1					\n"
 		"	.set	pop					\n"
 		: "=r" (num)
@@ -498,7 +498,7 @@
 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
 		__asm__(
 		"	.set	push					\n"
-		"	.set	mips64					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	dclz	%0, %1					\n"
 		"	.set	pop					\n"
 		: "=r" (num)
@@ -562,7 +562,7 @@
 	if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
 		__asm__(
 		"	.set	push					\n"
-		"	.set	mips32					\n"
+		"	.set	"MIPS_ISA_LEVEL"			\n"
 		"	clz	%0, %1					\n"
 		"	.set	pop					\n"
 		: "=r" (x)
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 3418c51..5c585c5 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -12,6 +12,10 @@
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
@@ -99,27 +103,23 @@
  */
 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
 				       int len, __wsum sum);
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
 /*
  *	Fold a partial checksum without adding pseudo headers
  */
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
-	__asm__(
-	"	.set	push		# csum_fold\n"
-	"	.set	noat		\n"
-	"	sll	$1, %0, 16	\n"
-	"	addu	%0, $1		\n"
-	"	sltu	$1, %0, $1	\n"
-	"	srl	%0, %0, 16	\n"
-	"	addu	%0, $1		\n"
-	"	xori	%0, 0xffff	\n"
-	"	.set	pop"
-	: "=r" (sum)
-	: "0" (sum));
+	u32 sum = (__force u32)csum;;
 
-	return (__force __sum16)sum;
+	sum += (sum << 16);
+	csum = (sum < csum);
+	sum >>= 16;
+	sum += csum;
+
+	return (__force __sum16)~sum;
 }
+#define csum_fold csum_fold
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@
 
 	return csum_fold(csum);
 }
+#define ip_fast_csum ip_fast_csum
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 	__be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@
 
 	return sum;
 }
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   __wsum sum)
-{
-	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@
 	return csum_fold(sum);
 }
 
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
 #endif /* _ASM_CHECKSUM_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 28b1edf..d0a2a68 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -31,24 +31,24 @@
 		"	sc	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	.set	mips0					\n"
-		: "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-		: GCC_OFF12_ASM() (*m), "Jr" (val)
+		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 		: "memory");
 	} else if (kernel_uses_llsc) {
 		unsigned long dummy;
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	ll	%0, %3		# xchg_u32	\n"
 			"	.set	mips0				\n"
 			"	move	%2, %z4				\n"
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	sc	%2, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
 			  "=&r" (dummy)
-			: GCC_OFF12_ASM() (*m), "Jr" (val)
+			: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 			: "memory");
 		} while (unlikely(!dummy));
 	} else {
@@ -82,22 +82,22 @@
 		"	scd	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	.set	mips0					\n"
-		: "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-		: GCC_OFF12_ASM() (*m), "Jr" (val)
+		: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 		: "memory");
 	} else if (kernel_uses_llsc) {
 		unsigned long dummy;
 
 		do {
 			__asm__ __volatile__(
-			"	.set	arch=r4000			\n"
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
 			"	lld	%0, %3		# xchg_u64	\n"
 			"	move	%2, %z4				\n"
 			"	scd	%2, %1				\n"
 			"	.set	mips0				\n"
-			: "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+			: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
 			  "=&r" (dummy)
-			: GCC_OFF12_ASM() (*m), "Jr" (val)
+			: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
 			: "memory");
 		} while (unlikely(!dummy));
 	} else {
@@ -158,25 +158,25 @@
 		"	beqzl	$1, 1b				\n"	\
 		"2:						\n"	\
 		"	.set	pop				\n"	\
-		: "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)		\
-		: GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)		\
+		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
 		: "memory");						\
 	} else if (kernel_uses_llsc) {					\
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
 		"	bne	%0, %z3, 2f			\n"	\
 		"	.set	mips0				\n"	\
 		"	move	$1, %z4				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"	" st "	$1, %1				\n"	\
 		"	beqz	$1, 1b				\n"	\
 		"	.set	pop				\n"	\
 		"2:						\n"	\
-		: "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)		\
-		: GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)		\
+		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
 		: "memory");						\
 	} else {							\
 		unsigned long __flags;					\
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index c73815e..e081a26 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,12 +16,30 @@
 #define GCC_REG_ACCUM "accum"
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
 #ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
 #else
 #error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 2897cfa..0d8208d 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
 #ifndef cpu_has_maar
 #define cpu_has_maar		(cpu_data[0].options & MIPS_CPU_MAAR)
 #endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb		(cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -171,6 +174,9 @@
 #endif
 #endif
 
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1		(!cpu_has_mips_r6)
+#endif
 #ifndef cpu_has_mips_2
 # define cpu_has_mips_2		(cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 #endif
@@ -189,12 +195,18 @@
 #ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 #endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6	(cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
 #ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 #endif
 #ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 #endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6	(cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
 
 /*
  * Shortcuts ...
@@ -208,17 +220,23 @@
 #define cpu_has_mips_4_5_r	(cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_5_r	(cpu_has_mips_5 | cpu_has_mips_r)
 
-#define cpu_has_mips_4_5_r2	(cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6	(cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+				 cpu_has_mips_r6)
 
-#define cpu_has_mips32	(cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64	(cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32	(cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64	(cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6	(cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r	(cpu_has_mips32r1 | cpu_has_mips32r2 | \
-			 cpu_has_mips64r1 | cpu_has_mips64r2)
+			 cpu_has_mips32r6 | cpu_has_mips64r1 | \
+			 cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6	(cpu_has_mips_r2 | cpu_has_mips_r6)
 
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 
 /*
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a6c9ccb..c3f4f2d 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -84,6 +84,11 @@
 	 * (shifted by _CACHE_SHIFT)
 	 */
 	unsigned int		writecombine;
+	/*
+	 * Simple counter to prevent enabling HTW in nested
+	 * htw_start/htw_stop calls
+	 */
+	unsigned int		htw_seq;
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index b4e2bd8..8245875 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -54,6 +54,13 @@
 	case CPU_M5150:
 #endif
 
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+	case CPU_QEMU_GENERIC:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
 	case CPU_5KC:
 	case CPU_5KE:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 33866fc..1568723 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -93,6 +93,7 @@
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  */
 
+#define PRID_IMP_QEMU_GENERIC	0x0000
 #define PRID_IMP_4KC		0x8000
 #define PRID_IMP_5KC		0x8100
 #define PRID_IMP_20KC		0x8200
@@ -312,6 +313,8 @@
 	CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
 	CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
+	CPU_QEMU_GENERIC,
+
 	CPU_LAST
 };
 
@@ -329,11 +332,14 @@
 #define MIPS_CPU_ISA_M32R2	0x00000020
 #define MIPS_CPU_ISA_M64R1	0x00000040
 #define MIPS_CPU_ISA_M64R2	0x00000080
+#define MIPS_CPU_ISA_M32R6	0x00000100
+#define MIPS_CPU_ISA_M64R6	0x00000200
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
-	MIPS_CPU_ISA_M32R2)
+	MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-	MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+	MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+	MIPS_CPU_ISA_M64R6)
 
 /*
  * CPU Option encodings
@@ -370,6 +376,7 @@
 #define MIPS_CPU_RIXIEX		0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_MAAR		0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE		0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB		0x1000000000ull /* LLADDR/LLB writes are allowed */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index ae6fedc..94105d3 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -26,8 +26,8 @@
 		"	sc	%0, %1					\n"
 		"	beqz	%0, 1b					\n"
 		"	.set	mips0					\n"
-		: "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
-		: GCC_OFF12_ASM() (*virt_addr));
+		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+		: GCC_OFF_SMALL_ASM() (*virt_addr));
 
 		virt_addr++;
 	}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index eb4d95d..535f196 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -417,13 +417,15 @@
 struct arch_elf_state {
 	int fp_abi;
 	int interp_fp_abi;
-	int overall_abi;
+	int overall_fp_mode;
 };
 
+#define MIPS_ABI_FP_UNKNOWN	(-1)	/* Unknown FP ABI (kernel internal) */
+
 #define INIT_ARCH_ELF_STATE {			\
-	.fp_abi = -1,				\
-	.interp_fp_abi = -1,			\
-	.overall_abi = -1,			\
+	.fp_abi = MIPS_ABI_FP_UNKNOWN,		\
+	.interp_fp_abi = MIPS_ABI_FP_UNKNOWN,	\
+	.overall_fp_mode = -1,			\
 }
 
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index affebb7..dd083e9 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -68,7 +68,8 @@
 		goto fr_common;
 
 	case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+      || defined(CONFIG_64BIT))
 		/* we only have a 32-bit FPU */
 		return SIGFPE;
 #endif
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index ef9987a..1de190b 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -45,19 +45,19 @@
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	.previous				\n"	\
 		: "=r" (ret), "=&r" (oldval),				\
-		  "=" GCC_OFF12_ASM() (*uaddr)				\
-		: "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),	\
+		  "=" GCC_OFF_SMALL_ASM() (*uaddr)				\
+		: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),	\
 		  "i" (-EFAULT)						\
 		: "memory");						\
 	} else if (cpu_has_llsc) {					\
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"1:	"user_ll("%1", "%4")" # __futex_atomic_op\n"	\
 		"	.set	mips0				\n"	\
 		"	" insn	"				\n"	\
-		"	.set	arch=r4000			\n"	\
+		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"2:	"user_sc("$1", "%2")"			\n"	\
 		"	beqz	$1, 1b				\n"	\
 		__WEAK_LLSC_MB						\
@@ -74,8 +74,8 @@
 		"	"__UA_ADDR "\t2b, 4b			\n"	\
 		"	.previous				\n"	\
 		: "=r" (ret), "=&r" (oldval),				\
-		  "=" GCC_OFF12_ASM() (*uaddr)				\
-		: "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),	\
+		  "=" GCC_OFF_SMALL_ASM() (*uaddr)				\
+		: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),	\
 		  "i" (-EFAULT)						\
 		: "memory");						\
 	} else								\
@@ -174,8 +174,8 @@
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	.previous					\n"
-		: "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-		: GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+		: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+		: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
 		  "i" (-EFAULT)
 		: "memory");
 	} else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@
 		"# futex_atomic_cmpxchg_inatomic			\n"
 		"	.set	push					\n"
 		"	.set	noat					\n"
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:	"user_ll("%1", "%3")"				\n"
 		"	bne	%1, %z4, 3f				\n"
 		"	.set	mips0					\n"
 		"	move	$1, %z5					\n"
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"2:	"user_sc("$1", "%2")"				\n"
 		"	beqz	$1, 1b					\n"
 		__WEAK_LLSC_MB
@@ -203,8 +203,8 @@
 		"	"__UA_ADDR "\t1b, 4b				\n"
 		"	"__UA_ADDR "\t2b, 4b				\n"
 		"	.previous					\n"
-		: "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-		: GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+		: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+		: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
 		  "i" (-EFAULT)
 		: "memory");
 	} else
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 4be1a57..71a986e 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -25,8 +25,6 @@
 
 	int  (*probe)(struct gio_device *, const struct gio_device_id *);
 	void (*remove)(struct gio_device *);
-	int  (*suspend)(struct gio_device *, pm_message_t);
-	int  (*resume)(struct gio_device *);
 	void (*shutdown)(struct gio_device *);
 
 	struct device_driver driver;
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e3ee92d..4087b47 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -11,6 +11,7 @@
 #define _ASM_HAZARDS_H
 
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 
 #define ___ssnop							\
 	sll	$0, $0, 1
@@ -21,7 +22,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@
 	unsigned long tmp;						\
 									\
 	__asm__ __volatile__(						\
-	"	.set	mips64r2				\n"	\
+	"	.set "MIPS_ISA_LEVEL"				\n"	\
 	"	dla	%0, 1f					\n"	\
 	"	jr.hb	%0					\n"	\
 	"	.set	mips0					\n"	\
@@ -132,7 +133,7 @@
 
 #define instruction_hazard()						\
 do {									\
-	if (cpu_has_mips_r2)						\
+	if (cpu_has_mips_r2_r6)						\
 		__instruction_hazard();					\
 } while (0)
 
@@ -240,7 +241,7 @@
 
 #define __disable_fpu_hazard
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 #define __enable_fpu_hazard						\
 	___ehb
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 0fa5fdc..d60cc68 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -15,9 +15,10 @@
 
 #include <linux/compiler.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 #include <asm/hazards.h>
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -118,7 +119,7 @@
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 static inline void arch_local_irq_enable(void)
 {
@@ -126,7 +127,7 @@
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
-#if   defined(CONFIG_CPU_MIPSR2)
+#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	"	ei							\n"
 #else
 	"	mfc0	$1,$12						\n"
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 46dfc3c..8feaed6 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -5,6 +5,7 @@
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
+#include <asm/compiler.h>
 #include <asm/war.h>
 
 typedef struct
@@ -47,7 +48,7 @@
 		unsigned long temp;
 
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:"	__LL	"%1, %2		# local_add_return	\n"
 		"	addu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
@@ -92,7 +93,7 @@
 		unsigned long temp;
 
 		__asm__ __volatile__(
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"1:"	__LL	"%1, %2		# local_sub_return	\n"
 		"	subu	%0, %1, %3				\n"
 			__SC	"%0, %2					\n"
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index 1668ee5..cf92fe7 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -8,11 +8,10 @@
 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 
-
-#define CP0_CYCLE_COUNTER $9, 6
 #define CP0_CVMCTL_REG $9, 7
 #define CP0_CVMMEMCTL_REG $11,7
 #define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
 #define CP0_PRID_OCTEON_PASS1 0x000d0000
 #define CP0_PRID_OCTEON_CN30XX 0x000d0200
 
@@ -38,36 +37,55 @@
 	# Needed for octeon specific memcpy
 	or  v0, v0, 0x5001
 	xor v0, v0, 0x1001
-	# Read the processor ID register
-	mfc0 v1, CP0_PRID_REG
-	# Disable instruction prefetching (Octeon Pass1 errata)
-	or  v0, v0, 0x2000
-	# Skip reenable of prefetching for Octeon Pass1
-	beq v1, CP0_PRID_OCTEON_PASS1, skip
-	nop
-	# Reenable instruction prefetching, not on Pass1
-	xor v0, v0, 0x2000
-	# Strip off pass number off of processor id
-	srl v1, 8
-	sll v1, 8
-	# CN30XX needs some extra stuff turned off for better performance
-	bne v1, CP0_PRID_OCTEON_CN30XX, skip
-	nop
-	# CN30XX Use random Icache replacement
-	or  v0, v0, 0x400
-	# CN30XX Disable instruction prefetching
-	or  v0, v0, 0x2000
-skip:
 	# First clear off CvmCtl[IPPCI] bit and move the performance
 	# counters interrupt to IRQ 6
-	li	v1, ~(7 << 7)
+	dli	v1, ~(7 << 7)
 	and	v0, v0, v1
 	ori	v0, v0, (6 << 7)
+
+	mfc0	v1, CP0_PRID_REG
+	and	t1, v1, 0xfff8
+	xor	t1, t1, 0x9000		# 63-P1
+	beqz	t1, 4f
+	and	t1, v1, 0xfff8
+	xor	t1, t1, 0x9008		# 63-P2
+	beqz	t1, 4f
+	and	t1, v1, 0xfff8
+	xor	t1, t1, 0x9100		# 68-P1
+	beqz	t1, 4f
+	and	t1, v1, 0xff00
+	xor	t1, t1, 0x9200		# 66-PX
+	bnez	t1, 5f			# Skip WAR for others.
+	and	t1, v1, 0x00ff
+	slti	t1, t1, 2		# 66-P1.2 and later good.
+	beqz	t1, 5f
+
+4:	# core-16057 work around
+	or	v0, v0, 0x2000		# Set IPREF bit.
+
+5:	# No core-16057 work around
 	# Write the cavium control register
 	dmtc0	v0, CP0_CVMCTL_REG
 	sync
 	# Flush dcache after config change
 	cache	9, 0($0)
+	# Zero all of CVMSEG to make sure parity is correct
+	dli	v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+	dsll	v0, 7
+	beqz	v0, 2f
+1:	dsubu	v0, 8
+	sd	$0, -32768(v0)
+	bnez	v0, 1b
+2:
+	mfc0	v0, CP0_PRID_REG
+	bbit0	v0, 15, 1f
+	# OCTEON II or better have bit 15 set.  Clear the error bits.
+	and	t1, v0, 0xff00
+	dli	v0, 0x9500
+	bge	t1, v0, 1f  # OCTEON III has no DCACHE_ERR_REG COP0
+	dli	v0, 0x27
+	dmtc0	v0, CP0_DCACHE_ERR_REG
+1:
 	# Get my core id
 	rdhwr	v0, $0
 	# Jump the master to kernel_entry
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
index eb72b35..35c80be 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/war.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -22,4 +22,7 @@
 #define R10000_LLSC_WAR			0
 #define MIPS34K_MISSED_ITLB_WAR		0
 
+#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR	\
+	OCTEON_IS_MODEL(OCTEON_CN6XXX)
+
 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2e54b4b..90dbe43 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -85,8 +85,8 @@
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -106,8 +106,8 @@
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -127,8 +127,8 @@
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (~mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -148,8 +148,8 @@
 	"	"__beqz"%0, 1b				\n"
 	"	nop					\n"
 	"	.set	pop				\n"
-	: "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-	: "ir" (mask), GCC_OFF12_ASM() (*addr));
+	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -220,8 +220,8 @@
 	"	.set	arch=r4000			\n"	\
 	"1:	ll	%0, %1	#custom_read_reg32	\n"	\
 	"	.set	pop				\n"	\
-	: "=r" (tmp), "=" GCC_OFF12_ASM() (*address)		\
-	: GCC_OFF12_ASM() (*address))
+	: "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
+	: GCC_OFF_SMALL_ASM() (*address))
 
 #define custom_write_reg32(address, tmp)			\
 	__asm__ __volatile__(					\
@@ -231,7 +231,7 @@
 	"	"__beqz"%0, 1b				\n"	\
 	"	nop					\n"	\
 	"	.set	pop				\n"	\
-	: "=&r" (tmp), "=" GCC_OFF12_ASM() (*address)		\
-	: "0" (tmp), GCC_OFF12_ASM() (*address))
+	: "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
+	: "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 
 #endif	/* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644
index 0000000..60570f2
--- /dev/null
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+	u64 movs;
+	u64 hilo;
+	u64 muls;
+	u64 divs;
+	u64 dsps;
+	u64 bops;
+	u64 traps;
+	u64 fpus;
+	u64 loads;
+	u64 stores;
+	u64 llsc;
+	u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+	u64 jrs;
+	u64 bltzl;
+	u64 bgezl;
+	u64 bltzll;
+	u64 bgezll;
+	u64 bltzall;
+	u64 bgezall;
+	u64 bltzal;
+	u64 bgezal;
+	u64 beql;
+	u64 bnel;
+	u64 blezl;
+	u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M)						\
+do {									\
+	u32 nir;							\
+	int err;							\
+									\
+	preempt_disable();						\
+	__this_cpu_inc(mipsr2emustats.M);				\
+	err = __get_user(nir, (u32 __user *)regs->cp0_epc);		\
+	if (!err) {							\
+		if (nir == BREAK_MATH)					\
+			__this_cpu_inc(mipsr2bdemustats.M);		\
+	}								\
+	preempt_enable();						\
+} while (0)
+
+#define MIPS_R2BR_STATS(M)					\
+do {								\
+	preempt_disable();					\
+	__this_cpu_inc(mipsr2bremustats.M);			\
+	preempt_enable();					\
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M)          do { } while (0)
+#define MIPS_R2BR_STATS(M)        do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+	u32     mask;
+	u32     code;
+	int     (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+			  const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU	(cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5b720d8..fef0044 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,7 @@
 #define MIPS_CONF5_NF		(_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR		(_ULCAST_(1) << 2)
 #define MIPS_CONF5_MRP		(_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB		(_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH		(_ULCAST_(1) << 5)
 #define MIPS_CONF5_FRE		(_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE		(_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@
 #define write_c0_config6(val)	__write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)	__write_32bit_c0_register($16, 7, val)
 
+#define read_c0_lladdr()	__read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)	__write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()		__read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)	__write_ulong_c0_register($17, 1, val)
 #define read_c0_maari()		__read_32bit_c0_register($17, 2)
@@ -1909,6 +1912,7 @@
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
 __BUILD_SET_C0(brcm_config_0)
 __BUILD_SET_C0(brcm_bus_pll)
 __BUILD_SET_C0(brcm_reset)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index c436138..1afa1f9 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,9 +1,12 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#include <linux/atomic.h>
+
 typedef struct {
 	unsigned long asid[NR_CPUS];
 	void *vdso;
+	atomic_t fp_mode_switching;
 } mm_context_t;
 
 #endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 2f82568..45914b5 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -25,7 +25,6 @@
 	if (cpu_has_htw) {						\
 		write_c0_pwbase(pgd);					\
 		back_to_back_c0_hazard();				\
-		htw_reset();						\
 	}								\
 } while (0)
 
@@ -132,6 +131,8 @@
 	for_each_possible_cpu(i)
 		cpu_context(i, mm) = 0;
 
+	atomic_set(&mm->context.fp_mode_switching, 0);
+
 	return 0;
 }
 
@@ -142,6 +143,7 @@
 	unsigned long flags;
 	local_irq_save(flags);
 
+	htw_stop();
 	/* Check if our ASID is of an older version and thus invalid */
 	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
 		get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@
 	 */
 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
 	cpumask_set_cpu(cpu, mm_cpumask(next));
+	htw_start();
 
 	local_irq_restore(flags);
 }
@@ -180,6 +183,7 @@
 
 	local_irq_save(flags);
 
+	htw_stop();
 	/* Unconditionally get a new ASID.  */
 	get_new_mmu_context(next, cpu);
 
@@ -189,6 +193,7 @@
 	/* mark mmu ownership change */
 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
 	cpumask_set_cpu(cpu, mm_cpumask(next));
+	htw_start();
 
 	local_irq_restore(flags);
 }
@@ -203,6 +208,7 @@
 	unsigned long flags;
 
 	local_irq_save(flags);
+	htw_stop();
 
 	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
 		get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@
 		/* will get a new context next time */
 		cpu_context(cpu, mm) = 0;
 	}
+	htw_start();
 	local_irq_restore(flags);
 }
 
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 800fe57..0aaf9a0 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -88,10 +88,14 @@
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #elif defined CONFIG_CPU_MIPS32_R2
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
 #elif defined CONFIG_CPU_MIPS64_R1
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 75739c8..8d05d90 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -275,7 +275,7 @@
 		" lbu	%[ticket], %[now_serving]\n"
 		"4:\n"
 		".set pop\n" :
-		[ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+		[ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
 		[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
 		[my_ticket] "=r"(my_ticket)
 	    );
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644
index 0000000..0c9c3e7
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+	uint64_t u64;
+	struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t chipkill:1;
+		uint64_t jtcsrdis:1;
+		uint64_t ejtagdis:1;
+		uint64_t romen:1;
+		uint64_t ckill_ppdis:1;
+		uint64_t jt_tstmode:1;
+		uint64_t vrm_err:1;
+		uint64_t reserved_37_56:20;
+		uint64_t c_mul:7;
+		uint64_t pnr_mul:6;
+		uint64_t reserved_21_23:3;
+		uint64_t lboot_oci:3;
+		uint64_t lboot_ext:6;
+		uint64_t lboot:10;
+		uint64_t rboot:1;
+		uint64_t rboot_pin:1;
+#else
+		uint64_t rboot_pin:1;
+		uint64_t rboot:1;
+		uint64_t lboot:10;
+		uint64_t lboot_ext:6;
+		uint64_t lboot_oci:3;
+		uint64_t reserved_21_23:3;
+		uint64_t pnr_mul:6;
+		uint64_t c_mul:7;
+		uint64_t reserved_37_56:20;
+		uint64_t vrm_err:1;
+		uint64_t jt_tstmode:1;
+		uint64_t ckill_ppdis:1;
+		uint64_t romen:1;
+		uint64_t ejtagdis:1;
+		uint64_t jtcsrdis:1;
+		uint64_t chipkill:1;
+#endif
+	} s;
+	struct cvmx_rst_boot_s cn70xx;
+	struct cvmx_rst_boot_s cn70xxp1;
+	struct cvmx_rst_boot_s cn78xx;
+};
+
+union cvmx_rst_cfg {
+	uint64_t u64;
+	struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t bist_delay:58;
+		uint64_t reserved_3_5:3;
+		uint64_t cntl_clr_bist:1;
+		uint64_t warm_clr_bist:1;
+		uint64_t soft_clr_bist:1;
+#else
+		uint64_t soft_clr_bist:1;
+		uint64_t warm_clr_bist:1;
+		uint64_t cntl_clr_bist:1;
+		uint64_t reserved_3_5:3;
+		uint64_t bist_delay:58;
+#endif
+	} s;
+	struct cvmx_rst_cfg_s cn70xx;
+	struct cvmx_rst_cfg_s cn70xxp1;
+	struct cvmx_rst_cfg_s cn78xx;
+};
+
+union cvmx_rst_ckill {
+	uint64_t u64;
+	struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_47_63:17;
+		uint64_t timer:47;
+#else
+		uint64_t timer:47;
+		uint64_t reserved_47_63:17;
+#endif
+	} s;
+	struct cvmx_rst_ckill_s cn70xx;
+	struct cvmx_rst_ckill_s cn70xxp1;
+	struct cvmx_rst_ckill_s cn78xx;
+};
+
+union cvmx_rst_ctlx {
+	uint64_t u64;
+	struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_10_63:54;
+		uint64_t prst_link:1;
+		uint64_t rst_done:1;
+		uint64_t rst_link:1;
+		uint64_t host_mode:1;
+		uint64_t reserved_4_5:2;
+		uint64_t rst_drv:1;
+		uint64_t rst_rcv:1;
+		uint64_t rst_chip:1;
+		uint64_t rst_val:1;
+#else
+		uint64_t rst_val:1;
+		uint64_t rst_chip:1;
+		uint64_t rst_rcv:1;
+		uint64_t rst_drv:1;
+		uint64_t reserved_4_5:2;
+		uint64_t host_mode:1;
+		uint64_t rst_link:1;
+		uint64_t rst_done:1;
+		uint64_t prst_link:1;
+		uint64_t reserved_10_63:54;
+#endif
+	} s;
+	struct cvmx_rst_ctlx_s cn70xx;
+	struct cvmx_rst_ctlx_s cn70xxp1;
+	struct cvmx_rst_ctlx_s cn78xx;
+};
+
+union cvmx_rst_delay {
+	uint64_t u64;
+	struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_32_63:32;
+		uint64_t warm_rst_dly:16;
+		uint64_t soft_rst_dly:16;
+#else
+		uint64_t soft_rst_dly:16;
+		uint64_t warm_rst_dly:16;
+		uint64_t reserved_32_63:32;
+#endif
+	} s;
+	struct cvmx_rst_delay_s cn70xx;
+	struct cvmx_rst_delay_s cn70xxp1;
+	struct cvmx_rst_delay_s cn78xx;
+};
+
+union cvmx_rst_eco {
+	uint64_t u64;
+	struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_32_63:32;
+		uint64_t eco_rw:32;
+#else
+		uint64_t eco_rw:32;
+		uint64_t reserved_32_63:32;
+#endif
+	} s;
+	struct cvmx_rst_eco_s cn78xx;
+};
+
+union cvmx_rst_int {
+	uint64_t u64;
+	struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_12_63:52;
+		uint64_t perst:4;
+		uint64_t reserved_4_7:4;
+		uint64_t rst_link:4;
+#else
+		uint64_t rst_link:4;
+		uint64_t reserved_4_7:4;
+		uint64_t perst:4;
+		uint64_t reserved_12_63:52;
+#endif
+	} s;
+	struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_11_63:53;
+		uint64_t perst:3;
+		uint64_t reserved_3_7:5;
+		uint64_t rst_link:3;
+#else
+		uint64_t rst_link:3;
+		uint64_t reserved_3_7:5;
+		uint64_t perst:3;
+		uint64_t reserved_11_63:53;
+#endif
+	} cn70xx;
+	struct cvmx_rst_int_cn70xx cn70xxp1;
+	struct cvmx_rst_int_s cn78xx;
+};
+
+union cvmx_rst_ocx {
+	uint64_t u64;
+	struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_3_63:61;
+		uint64_t rst_link:3;
+#else
+		uint64_t rst_link:3;
+		uint64_t reserved_3_63:61;
+#endif
+	} s;
+	struct cvmx_rst_ocx_s cn78xx;
+};
+
+union cvmx_rst_power_dbg {
+	uint64_t u64;
+	struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_3_63:61;
+		uint64_t str:3;
+#else
+		uint64_t str:3;
+		uint64_t reserved_3_63:61;
+#endif
+	} s;
+	struct cvmx_rst_power_dbg_s cn78xx;
+};
+
+union cvmx_rst_pp_power {
+	uint64_t u64;
+	struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_48_63:16;
+		uint64_t gate:48;
+#else
+		uint64_t gate:48;
+		uint64_t reserved_48_63:16;
+#endif
+	} s;
+	struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_4_63:60;
+		uint64_t gate:4;
+#else
+		uint64_t gate:4;
+		uint64_t reserved_4_63:60;
+#endif
+	} cn70xx;
+	struct cvmx_rst_pp_power_cn70xx cn70xxp1;
+	struct cvmx_rst_pp_power_s cn78xx;
+};
+
+union cvmx_rst_soft_prstx {
+	uint64_t u64;
+	struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_1_63:63;
+		uint64_t soft_prst:1;
+#else
+		uint64_t soft_prst:1;
+		uint64_t reserved_1_63:63;
+#endif
+	} s;
+	struct cvmx_rst_soft_prstx_s cn70xx;
+	struct cvmx_rst_soft_prstx_s cn70xxp1;
+	struct cvmx_rst_soft_prstx_s cn78xx;
+};
+
+union cvmx_rst_soft_rst {
+	uint64_t u64;
+	struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_1_63:63;
+		uint64_t soft_rst:1;
+#else
+		uint64_t soft_rst:1;
+		uint64_t reserved_1_63:63;
+#endif
+	} s;
+	struct cvmx_rst_soft_rst_s cn70xx;
+	struct cvmx_rst_soft_rst_s cn70xxp1;
+	struct cvmx_rst_soft_rst_s cn78xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index e8a1c2f..92b377e 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -45,6 +45,7 @@
  */
 
 #define OCTEON_FAMILY_MASK	0x00ffff00
+#define OCTEON_PRID_MASK	0x00ffffff
 
 /* Flag bits in top byte */
 /* Ignores revision in model checks */
@@ -63,11 +64,52 @@
 #define OM_MATCH_6XXX_FAMILY_MODELS	0x40000000
 /* Match all cnf7XXX Octeon models. */
 #define OM_MATCH_F7XXX_FAMILY_MODELS	0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS     0x10000000
+#define OM_MATCH_FAMILY_MODELS		(OM_MATCH_5XXX_FAMILY_MODELS |	\
+					 OM_MATCH_6XXX_FAMILY_MODELS |	\
+					 OM_MATCH_F7XXX_FAMILY_MODELS | \
+					 OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CN73XX_PASS1_0	0x000d9700
+#define OCTEON_CN73XX		(OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X	(OCTEON_CN73XX_PASS1_0 | \
+				 OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0	0x000d9600
+#define OCTEON_CN70XX_PASS1_1	0x000d9601
+#define OCTEON_CN70XX_PASS1_2	0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0	0x000d9608
+
+#define OCTEON_CN70XX		(OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X	(OCTEON_CN70XX_PASS1_0 | \
+				 OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X	(OCTEON_CN70XX_PASS2_0 | \
+				 OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX		OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0	0x000d9500
+#define OCTEON_CN78XX_PASS1_1	0x000d9501
+#define OCTEON_CN78XX_PASS2_0	0x000d9508
+
+#define OCTEON_CN78XX		(OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X	(OCTEON_CN78XX_PASS1_0 | \
+				 OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X	(OCTEON_CN78XX_PASS2_0 | \
+				 OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX		(0x000d9540 | OM_CHECK_SUBMODEL)
 
 /*
  * CNF7XXX models with new revision encoding
  */
 #define OCTEON_CNF71XX_PASS1_0	0x000d9400
+#define OCTEON_CNF71XX_PASS1_1  0x000d9401
 
 #define OCTEON_CNF71XX		(OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CNF71XX_PASS1_X	(OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -79,6 +121,8 @@
 #define OCTEON_CN68XX_PASS1_1	0x000d9101
 #define OCTEON_CN68XX_PASS1_2	0x000d9102
 #define OCTEON_CN68XX_PASS2_0	0x000d9108
+#define OCTEON_CN68XX_PASS2_1   0x000d9109
+#define OCTEON_CN68XX_PASS2_2   0x000d910a
 
 #define OCTEON_CN68XX		(OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN68XX_PASS1_X	(OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -104,11 +148,18 @@
 #define OCTEON_CN63XX_PASS1_X	(OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS2_X	(OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX           OCTEON_CN63XX
+
 #define OCTEON_CN61XX_PASS1_0	0x000d9300
+#define OCTEON_CN61XX_PASS1_1   0x000d9301
 
 #define OCTEON_CN61XX		(OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN61XX_PASS1_X	(OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX           OCTEON_CN61XX
+
 /*
  * CN5XXX models with new revision encoding
  */
@@ -120,7 +171,7 @@
 #define OCTEON_CN58XX_PASS2_2	0x000d030a
 #define OCTEON_CN58XX_PASS2_3	0x000d030b
 
-#define OCTEON_CN58XX		(OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX		(OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN58XX_PASS1_X	(OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS2_X	(OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS1	OCTEON_CN58XX_PASS1_X
@@ -217,12 +268,10 @@
 #define OCTEON_CN3XXX		(OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
 #define OCTEON_CN5XXX		(OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
 #define OCTEON_CN6XXX		(OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
-
-/* These are used to cover entire families of OCTEON processors */
-#define OCTEON_FAM_1		(OCTEON_CN3XXX)
-#define OCTEON_FAM_PLUS		(OCTEON_CN5XXX)
-#define OCTEON_FAM_1_PLUS	(OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
-#define OCTEON_FAM_2		(OCTEON_CN6XXX)
+#define OCTEON_CNF7XXX		(OCTEON_CNF71XX_PASS1_0 | \
+				 OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX		(OCTEON_CN78XX_PASS1_0 | \
+				 OM_MATCH_7XXX_FAMILY_MODELS)
 
 /* The revision byte (low byte) has two different encodings.
  * CN3XXX:
@@ -232,7 +281,7 @@
  *     <4>:   alternate package
  *     <3:0>: revision
  *
- * CN5XXX:
+ * CN5XXX and older models:
  *
  *     bits
  *     <7>:   reserved (0)
@@ -251,17 +300,21 @@
 /* CN5XXX and later use different layout of bits in the revision ID field */
 #define OCTEON_58XX_FAMILY_MASK	     OCTEON_38XX_FAMILY_MASK
 #define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK	     0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK	     0x00ffff40
 #define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
-#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
 #define OCTEON_5XXX_MODEL_MASK	     0x00ff0fc0
 
-/* forward declarations */
 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
 static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
 
 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
 
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
 /* NOTE: This for internal use only! */
 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)		\
 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && ( \
@@ -286,11 +339,18 @@
 		((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
 			&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
 		((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL)	\
-			&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+			&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
 		((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
-			&& ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+			&& ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+			&& ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
 		((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
-			&& ((chip_model) >= OCTEON_CN63XX_PASS1_0)) ||	\
+			&& ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+			&& ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+		((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+			&& ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+			&& ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+		((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+			&& ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
 		((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
 			&& (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
 		)))
@@ -300,14 +360,6 @@
 {
 	uint32_t cpuid = cvmx_get_proc_id();
 
-	/*
-	 * Check for special case of mismarked 3005 samples. We only
-	 * need to check if the sub model isn't being ignored
-	 */
-	if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
-		if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
-			cpuid |= 0x10;
-	}
 	return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
 }
 
@@ -326,10 +378,21 @@
 #define OCTEON_IS_COMMON_BINARY() 1
 #undef OCTEON_MODEL
 
+#define OCTEON_IS_OCTEON1()	OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS()	OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2()						\
+	(OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3()	OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS()	(OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
 const char *__init octeon_model_get_string(uint32_t chip_id);
 
 /*
  * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
  */
 static inline uint32_t cvmx_get_octeon_family(void)
 {
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 6dfefd2..0415965 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -9,6 +9,7 @@
 #define __ASM_OCTEON_OCTEON_H
 
 #include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
 
 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
 						uint64_t alignment,
@@ -53,6 +54,7 @@
 #define OCTOEN_SERIAL_LEN	20
 
 struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
 	/* Start of block referenced by assembly code - do not change! */
 	uint32_t desc_version;
 	uint32_t desc_size;
@@ -104,77 +106,149 @@
 	uint8_t mac_addr_base[6];
 	uint8_t mac_addr_count;
 	uint64_t cvmx_desc_vaddr;
+#else
+	uint32_t desc_size;
+	uint32_t desc_version;
+	uint64_t stack_top;
+	uint64_t heap_base;
+	uint64_t heap_end;
+	/* Only used by bootloader */
+	uint64_t entry_point;
+	uint64_t desc_vaddr;
+	/* End of This block referenced by assembly code - do not change! */
+	uint32_t stack_size;
+	uint32_t exception_base_addr;
+	uint32_t argc;
+	uint32_t heap_size;
+	/*
+	 * Argc count for application.
+	 * Warning low bit scrambled in little-endian.
+	 */
+	uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define  BOOT_FLAG_INIT_CORE		(1 << 0)
+#define  OCTEON_BL_FLAG_DEBUG		(1 << 1)
+#define  OCTEON_BL_FLAG_NO_MAGIC	(1 << 2)
+	/* If set, use uart1 for console */
+#define  OCTEON_BL_FLAG_CONSOLE_UART1	(1 << 3)
+	/* If set, use PCI console */
+#define  OCTEON_BL_FLAG_CONSOLE_PCI	(1 << 4)
+	/* Call exit on break on serial port */
+#define  OCTEON_BL_FLAG_BREAK		(1 << 5)
+
+	uint32_t core_mask;
+	uint32_t flags;
+	/* physical address of free memory descriptor block. */
+	uint32_t phy_mem_desc_addr;
+	/* DRAM size in megabyes. */
+	uint32_t dram_size;
+	/* CPU clock speed, in hz. */
+	uint32_t eclock_hz;
+	/* used to pass flags from app to debugger. */
+	uint32_t debugger_flags_base_addr;
+	/* SPI4 clock in hz. */
+	uint32_t spi_clock_hz;
+	/* DRAM clock speed, in hz. */
+	uint32_t dclock_hz;
+	uint8_t chip_rev_minor;
+	uint8_t chip_rev_major;
+	uint16_t chip_type;
+	uint8_t board_rev_minor;
+	uint8_t board_rev_major;
+	uint16_t board_type;
+
+	uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+	uint64_t cvmx_desc_vaddr;
+#endif
 };
 
 union octeon_cvmemctl {
 	uint64_t u64;
 	struct {
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t tlbbist:1;
+		__BITFIELD_FIELD(uint64_t tlbbist:1,
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t l1cbist:1;
+		__BITFIELD_FIELD(uint64_t l1cbist:1,
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t l1dbist:1;
+		__BITFIELD_FIELD(uint64_t l1dbist:1,
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t dcmbist:1;
+		__BITFIELD_FIELD(uint64_t dcmbist:1,
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t ptgbist:1;
+		__BITFIELD_FIELD(uint64_t ptgbist:1,
 		/* RO 1 = BIST fail, 0 = BIST pass */
-		uint64_t wbfbist:1;
+		__BITFIELD_FIELD(uint64_t wbfbist:1,
 		/* Reserved */
-		uint64_t reserved:22;
+		__BITFIELD_FIELD(uint64_t reserved:17,
+		/* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+		 * This field selects between the TLB replacement policies:
+		 * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+		 * recently used TLB entries and avoids them as new entries
+		 * are allocated. NLU simply guarantees that the next
+		 * allocation is not the last used TLB entry. */
+		__BITFIELD_FIELD(uint64_t tlbnlu:1,
+		/* OCTEON II - Selects the bit in the counter used for
+		 * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+		 * cycles. If not already released, the cnMIPS II core will
+		 * always release a given PAUSE instruction within
+		 * 2(8+PAUSETIME). If the counter trip happens to line up,
+		 * the cnMIPS II core may release the PAUSE instantly. */
+		__BITFIELD_FIELD(uint64_t pausetime:3,
+		/* OCTEON II - This field is an extension of
+		 * CvmMemCtl[DIDTTO] */
+		__BITFIELD_FIELD(uint64_t didtto2:1,
 		/* R/W If set, marked write-buffer entries time out
 		 * the same as as other entries; if clear, marked
 		 * write-buffer entries use the maximum timeout. */
-		uint64_t dismarkwblongto:1;
+		__BITFIELD_FIELD(uint64_t dismarkwblongto:1,
 		/* R/W If set, a merged store does not clear the
 		 * write-buffer entry timeout state. */
-		uint64_t dismrgclrwbto:1;
+		__BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
 		/* R/W Two bits that are the MSBs of the resultant
 		 * CVMSEG LM word location for an IOBDMA. The other 8
 		 * bits come from the SCRADDR field of the IOBDMA. */
-		uint64_t iobdmascrmsb:2;
+		__BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
 		/* R/W If set, SYNCWS and SYNCS only order marked
 		 * stores; if clear, SYNCWS and SYNCS only order
 		 * unmarked stores. SYNCWSMARKED has no effect when
 		 * DISSYNCWS is set. */
-		uint64_t syncwsmarked:1;
+		__BITFIELD_FIELD(uint64_t syncwsmarked:1,
 		/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
 		 * SYNC. */
-		uint64_t dissyncws:1;
+		__BITFIELD_FIELD(uint64_t dissyncws:1,
 		/* R/W If set, no stall happens on write buffer
 		 * full. */
-		uint64_t diswbfst:1;
+		__BITFIELD_FIELD(uint64_t diswbfst:1,
 		/* R/W If set (and SX set), supervisor-level
 		 * loads/stores can use XKPHYS addresses with
 		 * VA<48>==0 */
-		uint64_t xkmemenas:1;
+		__BITFIELD_FIELD(uint64_t xkmemenas:1,
 		/* R/W If set (and UX set), user-level loads/stores
 		 * can use XKPHYS addresses with VA<48>==0 */
-		uint64_t xkmemenau:1;
+		__BITFIELD_FIELD(uint64_t xkmemenau:1,
 		/* R/W If set (and SX set), supervisor-level
 		 * loads/stores can use XKPHYS addresses with
 		 * VA<48>==1 */
-		uint64_t xkioenas:1;
+		__BITFIELD_FIELD(uint64_t xkioenas:1,
 		/* R/W If set (and UX set), user-level loads/stores
 		 * can use XKPHYS addresses with VA<48>==1 */
-		uint64_t xkioenau:1;
+		__BITFIELD_FIELD(uint64_t xkioenau:1,
 		/* R/W If set, all stores act as SYNCW (NOMERGE must
 		 * be set when this is set) RW, reset to 0. */
-		uint64_t allsyncw:1;
+		__BITFIELD_FIELD(uint64_t allsyncw:1,
 		/* R/W If set, no stores merge, and all stores reach
 		 * the coherent bus in order. */
-		uint64_t nomerge:1;
+		__BITFIELD_FIELD(uint64_t nomerge:1,
 		/* R/W Selects the bit in the counter used for DID
 		 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
 		 * 214. Actual time-out is between 1x and 2x this
 		 * interval. For example, with DIDTTO=3, expiration
 		 * interval is between 16K and 32K. */
-		uint64_t didtto:2;
+		__BITFIELD_FIELD(uint64_t didtto:2,
 		/* R/W If set, the (mem) CSR clock never turns off. */
-		uint64_t csrckalwys:1;
+		__BITFIELD_FIELD(uint64_t csrckalwys:1,
 		/* R/W If set, mclk never turns off. */
-		uint64_t mclkalwys:1;
+		__BITFIELD_FIELD(uint64_t mclkalwys:1,
 		/* R/W Selects the bit in the counter used for write
 		 * buffer flush time-outs (WBFLT+11) is the bit
 		 * position in an internal counter used to determine
@@ -182,25 +256,26 @@
 		 * 2x this interval. For example, with WBFLT = 0, a
 		 * write buffer expires between 2K and 4K cycles after
 		 * the write buffer entry is allocated. */
-		uint64_t wbfltime:3;
+		__BITFIELD_FIELD(uint64_t wbfltime:3,
 		/* R/W If set, do not put Istream in the L2 cache. */
-		uint64_t istrnol2:1;
+		__BITFIELD_FIELD(uint64_t istrnol2:1,
 		/* R/W The write buffer threshold. */
-		uint64_t wbthresh:4;
+		__BITFIELD_FIELD(uint64_t wbthresh:4,
 		/* Reserved */
-		uint64_t reserved2:2;
+		__BITFIELD_FIELD(uint64_t reserved2:2,
 		/* R/W If set, CVMSEG is available for loads/stores in
 		 * kernel/debug mode. */
-		uint64_t cvmsegenak:1;
+		__BITFIELD_FIELD(uint64_t cvmsegenak:1,
 		/* R/W If set, CVMSEG is available for loads/stores in
 		 * supervisor mode. */
-		uint64_t cvmsegenas:1;
+		__BITFIELD_FIELD(uint64_t cvmsegenas:1,
 		/* R/W If set, CVMSEG is available for loads/stores in
 		 * user mode. */
-		uint64_t cvmsegenau:1;
+		__BITFIELD_FIELD(uint64_t cvmsegenau:1,
 		/* R/W Size of local memory in cache blocks, 54 (6912
 		 * bytes) is max legal value. */
-		uint64_t lmemsz:6;
+		__BITFIELD_FIELD(uint64_t lmemsz:6,
+		;)))))))))))))))))))))))))))))))))
 	} s;
 };
 
@@ -224,6 +299,19 @@
 	cvmx_read64_uint32(address ^ 4);
 }
 
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
 
 /**
  * Read a 32bit value from the Octeon NPI register space
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 6952962..193b4c6 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -121,6 +121,7 @@
 }
 #endif
 
+#ifdef CONFIG_PCI_DOMAINS
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@
 	struct pci_controller *hose = bus->sysdata;
 	return hose->need_domain_info;
 }
+#endif /* CONFIG_PCI_DOMAINS */
 
 #endif /* __KERNEL__ */
 
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index fc807aa..91747c2 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -35,7 +35,7 @@
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
 /*
- * The following bits are directly used by the TLB hardware
+ * The following bits are implemented by the TLB hardware
  */
 #define _PAGE_GLOBAL_SHIFT	0
 #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
@@ -60,43 +60,40 @@
 #define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
 
-#define _PAGE_SILENT_READ	_PAGE_VALID
-#define _PAGE_SILENT_WRITE	_PAGE_DIRTY
-
 #define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
- * The following are implemented by software
+ * The following bits are implemented in software
  */
-#define _PAGE_PRESENT_SHIFT	0
-#define _PAGE_PRESENT		(1 <<  _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT	1
-#define _PAGE_READ		(1 <<  _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT	2
-#define _PAGE_WRITE		(1 <<  _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT	3
-#define _PAGE_ACCESSED		(1 <<  _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT	4
-#define _PAGE_MODIFIED		(1 <<  _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PRESENT_SHIFT	(0)
+#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
 
 /*
- * And these are the hardware TLB bits
+ * The following bits are implemented by the TLB hardware
  */
-#define _PAGE_GLOBAL_SHIFT	8
-#define _PAGE_GLOBAL		(1 <<  _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT	9
-#define _PAGE_VALID		(1 <<  _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ	(1 <<  _PAGE_VALID_SHIFT)	/* synonym  */
-#define _PAGE_DIRTY_SHIFT	10
+#define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 4)
+#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE	(1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT	11
+#define _CACHE_UNCACHED_SHIFT	(_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_UNCACHED		(1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK		(1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK		_CACHE_UNCACHED
 
-#else /* 'Normal' r4K case */
+#define _PFN_SHIFT		PAGE_SHIFT
+
+#else
 /*
  * When using the RI/XI bit support, we have 13 bits of flags below
  * the physical address. The RI/XI bits are placed such that a SRL 5
@@ -107,10 +104,8 @@
 
 /*
  * The following bits are implemented in software
- *
- * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
  */
-#define _PAGE_PRESENT_SHIFT	(0)
+#define _PAGE_PRESENT_SHIFT	0
 #define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT	(cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
@@ -125,16 +120,11 @@
 /* huge tlb page */
 #define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
-#else
-#define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE		({BUG(); 1; })	/* Dummy value */
-#endif
-
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
 #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING		(1 << _PAGE_SPLITTING_SHIFT)
 #else
+#define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE		({BUG(); 1; })	/* Dummy value */
 #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING		({BUG(); 1; })	/* Dummy value */
 #endif
@@ -149,17 +139,10 @@
 
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-
 #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
-/* synonym		   */
-#define _PAGE_SILENT_READ	(_PAGE_VALID)
-
-/* The MIPS dirty bit	   */
 #define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE	(_PAGE_DIRTY)
-
 #define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_MASK		(7 << _CACHE_SHIFT)
 
@@ -167,9 +150,9 @@
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
-#ifndef _PFN_SHIFT
-#define _PFN_SHIFT		    PAGE_SHIFT
-#endif
+#define _PAGE_SILENT_READ	_PAGE_VALID
+#define _PAGE_SILENT_WRITE	_PAGE_DIRTY
+
 #define _PFN_MASK		(~((1 << (_PFN_SHIFT)) - 1))
 
 #ifndef _PAGE_NO_READ
@@ -179,9 +162,6 @@
 #ifndef _PAGE_NO_EXEC
 #define _PAGE_NO_EXEC ({BUG(); 0; })
 #endif
-#ifndef _PAGE_GLOBAL_SHIFT
-#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
-#endif
 
 
 #ifndef __ASSEMBLY__
@@ -266,8 +246,9 @@
 #endif
 
 #define __READABLE	(_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
-#define __WRITEABLE	(_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
+#define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK	(_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\
+			 _PFN_MASK | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 583ff42..bef782c 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -99,29 +99,35 @@
 
 #define htw_stop()							\
 do {									\
-	if (cpu_has_htw)						\
-		write_c0_pwctl(read_c0_pwctl() &			\
-			       ~(1 << MIPS_PWCTL_PWEN_SHIFT));		\
+	unsigned long flags;						\
+									\
+	if (cpu_has_htw) {						\
+		local_irq_save(flags);					\
+		if(!raw_current_cpu_data.htw_seq++) {			\
+			write_c0_pwctl(read_c0_pwctl() &		\
+				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
+			back_to_back_c0_hazard();			\
+		}							\
+		local_irq_restore(flags);				\
+	}								\
 } while(0)
 
 #define htw_start()							\
 do {									\
-	if (cpu_has_htw)						\
-		write_c0_pwctl(read_c0_pwctl() |			\
-			       (1 << MIPS_PWCTL_PWEN_SHIFT));		\
-} while(0)
-
-
-#define htw_reset()							\
-do {									\
+	unsigned long flags;						\
+									\
 	if (cpu_has_htw) {						\
-		htw_stop();						\
-		back_to_back_c0_hazard();				\
-		htw_start();						\
-		back_to_back_c0_hazard();				\
+		local_irq_save(flags);					\
+		if (!--raw_current_cpu_data.htw_seq) {			\
+			write_c0_pwctl(read_c0_pwctl() |		\
+				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
+			back_to_back_c0_hazard();			\
+		}							\
+		local_irq_restore(flags);				\
 	}								\
 } while(0)
 
+
 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 	pte_t pteval);
 
@@ -153,12 +159,13 @@
 {
 	pte_t null = __pte(0);
 
+	htw_stop();
 	/* Preserve global status for the pair */
 	if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
 		null.pte_low = null.pte_high = _PAGE_GLOBAL;
 
 	set_pte_at(mm, addr, ptep, null);
-	htw_reset();
+	htw_start();
 }
 #else
 
@@ -188,6 +195,7 @@
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+	htw_stop();
 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
 	/* Preserve global status for the pair */
 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@
 	else
 #endif
 		set_pte_at(mm, addr, ptep, __pte(0));
-	htw_reset();
+	htw_start();
 }
 #endif
 
@@ -334,7 +342,7 @@
 	return pte;
 }
 
-#ifdef _PAGE_HUGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
 
 static inline pte_t pte_mkhuge(pte_t pte)
@@ -342,7 +350,7 @@
 	pte_val(pte) |= _PAGE_HUGE;
 	return pte;
 }
-#endif /* _PAGE_HUGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 #endif
 static inline int pte_special(pte_t pte)	{ return 0; }
 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index f1df4cb..b5dcbee 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -54,9 +54,7 @@
 #define TASK_SIZE	0x7fff8000UL
 #endif
 
-#ifdef __KERNEL__
 #define STACK_TOP_MAX	TASK_SIZE
-#endif
 
 #define TASK_IS_32BIT_ADDR 1
 
@@ -73,11 +71,7 @@
 #define TASK_SIZE32	0x7fff8000UL
 #define TASK_SIZE64	0x10000000000UL
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
-
-#ifdef __KERNEL__
 #define STACK_TOP_MAX	TASK_SIZE64
-#endif
-
 
 #define TASK_SIZE_OF(tsk)						\
 	(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@
 	unsigned long	cop2_gfm_poly;
 	/* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
 	unsigned long	cop2_gfm_result[2];
+	/* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+	unsigned long	cop2_sha3[2];
 };
 #define COP2_INIT						\
 	.cp2			= {0,},
@@ -399,4 +395,15 @@
 
 #endif
 
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+				    unsigned int value);
+
+#define GET_FP_MODE(task)		mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value)		mips_set_process_fp_mode(task, value)
+
 #endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index eaa2627..8ebc2aa 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -24,13 +24,6 @@
 extern void __dt_setup_arch(void *bph);
 extern int __dt_register_buses(const char *bus0, const char *bus1);
 
-#define dt_setup_arch(sym)						\
-({									\
-	extern char __dtb_##sym##_begin[];				\
-									\
-	__dt_setup_arch(__dtb_##sym##_begin);				\
-})
-
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index fc783f8..ffc3203 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -40,8 +40,8 @@
 	unsigned long cp0_cause;
 	unsigned long cp0_epc;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-	unsigned long long mpl[3];	  /* MTM{0,1,2} */
-	unsigned long long mtp[3];	  /* MTP{0,1,2} */
+	unsigned long long mpl[6];        /* MTM{0-5} */
+	unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
 } __aligned(8);
 
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index e293a8d..1b22d2d 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -14,6 +14,7 @@
 
 #include <asm/asm.h>
 #include <asm/cacheops.h>
+#include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
 	"	.set	noreorder				\n"	\
-	"	.set	arch=r4000				\n"	\
+	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 	"	cache	%0, %1					\n"	\
 	"	.set	pop					\n"	\
 	:								\
@@ -147,7 +148,7 @@
 	__asm__ __volatile__(					\
 	"	.set	push			\n"		\
 	"	.set	noreorder		\n"		\
-	"	.set	arch=r4000		\n"		\
+	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
 	"1:	cache	%0, (%1)		\n"		\
 	"2:	.set	pop			\n"		\
 	"	.section __ex_table,\"a\"	\n"		\
@@ -218,6 +219,7 @@
 	cache_op(Page_Invalidate_T, addr);
 }
 
+#ifndef CONFIG_CPU_MIPSR6
 #define cache16_unroll32(base,op)					\
 	__asm__ __volatile__(						\
 	"	.set push					\n"	\
@@ -322,6 +324,150 @@
 		: "r" (base),						\
 		  "i" (op));
 
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
+	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
+	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
+	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
+	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
+	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
+	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
+	"	addiu $1, $0, 0x100			\n"	\
+	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
+	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
+	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
+	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
+	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache32_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
+	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
+	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	addiu $1, $1, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	addiu $1, $1, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
+	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
+	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache64_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
+	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
+	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+
+#define cache128_unroll32(base,op)				\
+	__asm__ __volatile__(					\
+	"	.set push\n"					\
+	"	.set noreorder\n"				\
+	"	.set mips64r6\n"				\
+	"	.set noat\n"					\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
+	"	addiu $1, %0, 0x100\n"				\
+	"	.set pop\n"					\
+		:						\
+		: "r" (base),					\
+		  "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 /*
  * Perform the cache operation specified by op using a user mode virtual
  * address while in kernel mode.
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 753275a..195db50 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -11,6 +11,7 @@
 #ifndef _ASM_SGIALIB_H
 #define _ASM_SGIALIB_H
 
+#include <linux/compiler.h>
 #include <asm/sgiarcs.h>
 
 extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@
 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 
 /* Misc. routines. */
-extern VOID ArcReboot(VOID) __attribute__((noreturn));
-extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn));
+extern VOID ArcHalt(VOID) __noreturn;
+extern VOID ArcPowerDown(VOID) __noreturn;
+extern VOID ArcRestart(VOID) __noreturn;
+extern VOID ArcReboot(VOID) __noreturn;
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
 extern VOID ArcFlushAllCaches(VOID);
 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
 
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644
index dd9a762..0000000
--- a/arch/mips/include/asm/siginfo.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
- * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-/*
- * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
- */
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
-	if (from->si_code < 0)
-		memcpy(to, from, sizeof(*to));
-	else
-		/* _sigchld is currently the largest know union member */
-		memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_SIGINFO_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index c6d06d3..b454869 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -89,7 +89,7 @@
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	.previous					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@
 		"	 subu	%[ticket], %[ticket], 1			\n"
 		"	.previous					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [serving_now_ptr] "+m" (lock->h.serving_now),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@
 		"	 li	%[ticket], 0				\n"
 		"	.previous					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (tmp2),
 		  [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@
 		"	 li	%[ticket], 0				\n"
 		"	.previous					\n"
 		"	.set pop					\n"
-		: [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
 		  [ticket] "=&r" (tmp),
 		  [my_ticket] "=&r" (tmp2),
 		  [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
 		"	.set	reorder					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	} else {
 		do {
@@ -245,8 +245,8 @@
 			"	bltz	%1, 1b				\n"
 			"	 addu	%1, 1				\n"
 			"2:	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 		} while (unlikely(!tmp));
 	}
@@ -254,9 +254,6 @@
 	smp_llsc_mb();
 }
 
-/* Note the use of sub, not subu which will make the kernel die with an
-   overflow exception if we ever try to unlock an rwlock that is already
-   unlocked or is being held by a writer.  */
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned int tmp;
@@ -266,20 +263,20 @@
 	if (R10000_LLSC_WAR) {
 		__asm__ __volatile__(
 		"1:	ll	%1, %2		# arch_read_unlock	\n"
-		"	sub	%1, 1					\n"
+		"	addiu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	} else {
 		do {
 			__asm__ __volatile__(
 			"1:	ll	%1, %2	# arch_read_unlock	\n"
-			"	sub	%1, 1				\n"
+			"	addiu	%1, -1				\n"
 			"	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 		} while (unlikely(!tmp));
 	}
@@ -299,8 +296,8 @@
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
 		"	.set	reorder					\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	} else {
 		do {
@@ -309,8 +306,8 @@
 			"	bnez	%1, 1b				\n"
 			"	 lui	%1, 0x8000			\n"
 			"2:	sc	%1, %0				\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-			: GCC_OFF12_ASM() (rw->lock)
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 		} while (unlikely(!tmp));
 	}
@@ -349,8 +346,8 @@
 		__WEAK_LLSC_MB
 		"	li	%2, 1					\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	} else {
 		__asm__ __volatile__(
@@ -366,8 +363,8 @@
 		__WEAK_LLSC_MB
 		"	li	%2, 1					\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	}
 
@@ -393,8 +390,8 @@
 		"	li	%2, 1					\n"
 		"	.set	reorder					\n"
 		"2:							\n"
-		: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: GCC_OFF12_ASM() (rw->lock)
+		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+		: GCC_OFF_SMALL_ASM() (rw->lock)
 		: "memory");
 	} else {
 		do {
@@ -406,9 +403,9 @@
 			"	sc	%1, %0				\n"
 			"	li	%2, 1				\n"
 			"2:						\n"
-			: "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
 			  "=&r" (ret)
-			: GCC_OFF12_ASM() (rw->lock)
+			: GCC_OFF_SMALL_ASM() (rw->lock)
 			: "memory");
 		} while (unlikely(!tmp));
 
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 0b89006..0f90d88 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -1,10 +1,10 @@
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
 extern __init void spram_config(void);
 #else
 static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
 
 #endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b188c797..28d6d93 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -40,7 +40,7 @@
 		LONG_S	v1, PT_HI(sp)
 		mflhxu	v1
 		LONG_S	v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
 		mfhi	v1
 #endif
 #ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
 		LONG_S	$10, PT_R10(sp)
 		LONG_S	$11, PT_R11(sp)
 		LONG_S	$12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 		LONG_S	v1, PT_HI(sp)
 		mflo	v1
 #endif
@@ -58,7 +58,7 @@
 		LONG_S	$14, PT_R14(sp)
 		LONG_S	$15, PT_R15(sp)
 		LONG_S	$24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 		LONG_S	v1, PT_LO(sp)
 #endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -226,7 +226,7 @@
 		mtlhx	$24
 		LONG_L	$24, PT_LO(sp)
 		mtlhx	$24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
 		LONG_L	$24, PT_LO(sp)
 		mtlo	$24
 		LONG_L	$24, PT_HI(sp)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index b928b6f..e92d6c4b 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -75,9 +75,12 @@
 #endif
 
 #define __clear_software_ll_bit()					\
-do {									\
-	if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)	\
-		ll_bit = 0;						\
+do {	if (cpu_has_rw_llb) {						\
+		write_c0_lladdr(0);					\
+	} else {							\
+		if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+			ll_bit = 0;					\
+	}								\
 } while (0)
 
 #define switch_to(prev, next, last)					\
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 9e1295f..55ed660 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,7 +28,7 @@
 	unsigned long		tp_value;	/* thread pointer */
 	__u32			cpu;		/* current CPU */
 	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
-
+	int			r2_emul_return; /* 1 => Returning from R2 emulator */
 	mm_segment_t		addr_limit;	/*
 						 * thread address space limit:
 						 * 0x7fffffff for user-thead
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 89c2243..fc0cf5a 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
 enum major_op {
 	spec_op, bcond_op, j_op, jal_op,
 	beq_op, bne_op, blez_op, bgtz_op,
-	addi_op, addiu_op, slti_op, sltiu_op,
+	addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
 	andi_op, ori_op, xori_op, lui_op,
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
-	daddi_op, daddiu_op, ldl_op, ldr_op,
+	daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
 	spec2_op, jalx_op, mdmx_op, spec3_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	sb_op, sh_op, swl_op, sw_op,
 	sdl_op, sdr_op, swr_op, cache_op,
-	ll_op, lwc1_op, lwc2_op, pref_op,
-	lld_op, ldc1_op, ldc2_op, ld_op,
-	sc_op, swc1_op, swc2_op, major_3b_op,
-	scd_op, sdc1_op, sdc2_op, sd_op
+	ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+	lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+	sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+	scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
 };
 
 /*
@@ -83,9 +83,12 @@
 	swe_op    = 0x1f, bshfl_op  = 0x20,
 	swle_op   = 0x21, swre_op   = 0x22,
 	prefe_op  = 0x23, dbshfl_op = 0x24,
-	lbue_op   = 0x28, lhue_op   = 0x29,
-	lbe_op    = 0x2c, lhe_op    = 0x2d,
-	lle_op    = 0x2e, lwe_op    = 0x2f,
+	cache6_op = 0x25, sc6_op    = 0x26,
+	scd6_op   = 0x27, lbue_op   = 0x28,
+	lhue_op   = 0x29, lbe_op    = 0x2c,
+	lhe_op    = 0x2d, lle_op    = 0x2e,
+	lwe_op    = 0x2f, pref6_op  = 0x35,
+	ll6_op    = 0x36, lld6_op   = 0x37,
 	rdhwr_op  = 0x3b
 };
 
@@ -112,7 +115,8 @@
 	mfhc_op       = 0x03, mtc_op	    = 0x04,
 	dmtc_op	      = 0x05, ctc_op	    = 0x06,
 	mthc0_op      = 0x06, mthc_op	    = 0x07,
-	bc_op	      = 0x08, cop_op	    = 0x10,
+	bc_op	      = 0x08, bc1eqz_op     = 0x09,
+	bc1nez_op     = 0x0d, cop_op	    = 0x10,
 	copm_op	      = 0x18
 };
 
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index d08f83f..2cb7fde 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -16,13 +16,6 @@
 #define HAVE_ARCH_SIGINFO_T
 
 /*
- * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked
- * by design ...
- */
-#define HAVE_ARCH_COPY_SIGINFO
-struct siginfo;
-
-/*
  * Careful to keep union _sifields from shifting ...
  */
 #if _MIPS_SZLONG == 32
@@ -35,8 +28,9 @@
 
 #define __ARCH_SIGSYS
 
-#include <asm-generic/siginfo.h>
+#include <uapi/asm-generic/siginfo.h>
 
+/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
 typedef struct siginfo {
 	int si_signo;
 	int si_code;
@@ -124,5 +118,6 @@
 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
 
+#include <asm-generic/siginfo.h>
 
 #endif /* _UAPI_ASM_SIGINFO_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 92987d1..d3d2ff2 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,7 +52,7 @@
 obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)		+= smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_GIC_IPI)	+= smp-gic.o
-obj-$(CONFIG_CPU_MIPSR2)	+= spram.o
+obj-$(CONFIG_MIPS_SPRAM)	+= spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@
 obj-$(CONFIG_EARLY_PRINTK_8250)	+= early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)	+= spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR)	+= mips-r2-to-r6-emul.o
 
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 3b2dfdb..750d67a 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -97,6 +97,7 @@
 	OFFSET(TI_TP_VALUE, thread_info, tp_value);
 	OFFSET(TI_CPU, thread_info, cpu);
 	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+	OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
 	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
 	OFFSET(TI_REGS, thread_info, regs);
 	DEFINE(_THREAD_SIZE, THREAD_SIZE);
@@ -381,6 +382,7 @@
 	OFFSET(OCTEON_CP2_GFM_RESULT,	octeon_cop2_state, cop2_gfm_result);
 	OFFSET(OCTEON_CP2_HSH_DATW,	octeon_cop2_state, cop2_hsh_datw);
 	OFFSET(OCTEON_CP2_HSH_IVW,	octeon_cop2_state, cop2_hsh_ivw);
+	OFFSET(OCTEON_CP2_SHA3,		octeon_cop2_state, cop2_sha3);
 	OFFSET(THREAD_CP2,	task_struct, thread.cp2);
 	OFFSET(THREAD_CVMSEG,	task_struct, thread.cvmseg.cvmseg);
 	BLANK();
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d7d99d..c2e0f45 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -16,6 +16,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
@@ -399,11 +400,21 @@
  * @returns:	-EFAULT on error and forces SIGBUS, and on success
  *		returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *		evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ *	Compact branches do not throw exceptions because they do
+ *	not have delay slots. The forbidden slot instruction ($PC+4)
+ *	is only executed if the branch was not taken. Otherwise the
+ *	forbidden slot is skipped entirely. This means that the
+ *	only possible reason to be here because of a MIPS R6 compact
+ *	branch instruction is that the forbidden slot has thrown one.
+ *	In that case the branch was not taken, so the EPC can be safely
+ *	set to EPC + 8.
  */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
 				   union mips_instruction insn)
 {
-	unsigned int bit, fcr31, dspcontrol;
+	unsigned int bit, fcr31, dspcontrol, reg;
 	long epc = regs->cp0_epc;
 	int ret = 0;
 
@@ -417,6 +428,8 @@
 			regs->regs[insn.r_format.rd] = epc + 8;
 			/* Fall through */
 		case jr_op:
+			if (NO_R6EMU && insn.r_format.func == jr_op)
+				goto sigill_r6;
 			regs->cp0_epc = regs->regs[insn.r_format.rs];
 			break;
 		}
@@ -429,8 +442,10 @@
 	 */
 	case bcond_op:
 		switch (insn.i_format.rt) {
-		case bltz_op:
 		case bltzl_op:
+			if (NO_R6EMU)
+				goto sigill_r6;
+		case bltz_op:
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@
 			regs->cp0_epc = epc;
 			break;
 
-		case bgez_op:
 		case bgezl_op:
+			if (NO_R6EMU)
+				goto sigill_r6;
+		case bgez_op:
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@
 
 		case bltzal_op:
 		case bltzall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bltzall_op)) {
+				ret = -SIGILL;
+				break;
+			}
 			regs->regs[31] = epc + 8;
+			/*
+			 * OK we are here either because we hit a NAL
+			 * instruction or because we are emulating an
+			 * old bltzal{,l} one. Lets figure out what the
+			 * case really is.
+			 */
+			if (!insn.i_format.rs) {
+				/*
+				 * NAL or BLTZAL with rs == 0
+				 * Doesn't matter if we are R6 or not. The
+				 * result is the same
+				 */
+				regs->cp0_epc += 4 +
+					(insn.i_format.simmediate << 2);
+				break;
+			}
+			/* Now do the real thing for non-R6 BLTZAL{,L} */
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@
 
 		case bgezal_op:
 		case bgezall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bgezall_op)) {
+				ret = -SIGILL;
+				break;
+			}
 			regs->regs[31] = epc + 8;
+			/*
+			 * OK we are here either because we hit a BAL
+			 * instruction or because we are emulating an
+			 * old bgezal{,l} one. Lets figure out what the
+			 * case really is.
+			 */
+			if (!insn.i_format.rs) {
+				/*
+				 * BAL or BGEZAL with rs == 0
+				 * Doesn't matter if we are R6 or not. The
+				 * result is the same
+				 */
+				regs->cp0_epc += 4 +
+					(insn.i_format.simmediate << 2);
+				break;
+			}
+			/* Now do the real thing for non-R6 BGEZAL{,L} */
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
 				if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@
 
 		case bposge32_op:
 			if (!cpu_has_dsp)
-				goto sigill;
+				goto sigill_dsp;
 
 			dspcontrol = rddsp(0x01);
 
@@ -508,8 +569,10 @@
 	/*
 	 * These are conditional and in i_format.
 	 */
-	case beq_op:
 	case beql_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case beq_op:
 		if (regs->regs[insn.i_format.rs] ==
 		    regs->regs[insn.i_format.rt]) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@
 		regs->cp0_epc = epc;
 		break;
 
-	case bne_op:
 	case bnel_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case bne_op:
 		if (regs->regs[insn.i_format.rs] !=
 		    regs->regs[insn.i_format.rt]) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@
 		regs->cp0_epc = epc;
 		break;
 
-	case blez_op: /* not really i_format */
-	case blezl_op:
+	case blezl_op: /* not really i_format */
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case blez_op:
+		/*
+		 * Compact branches for R6 for the
+		 * blez and blezl opcodes.
+		 * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+		 * BLEZ  | rs = rt != 0      == BGEZALC
+		 * BLEZ  | rs != 0 | rt != 0 == BGEUC
+		 * BLEZL | rs = 0 | rt != 0  == BLEZC
+		 * BLEZL | rs = rt != 0      == BGEZC
+		 * BLEZL | rs != 0 | rt != 0 == BGEC
+		 *
+		 * For real BLEZ{,L}, rt is always 0.
+		 */
+
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = epc + 4;
+			regs->cp0_epc += 8;
+			break;
+		}
 		/* rt field assumed to be zero */
 		if ((long)regs->regs[insn.i_format.rs] <= 0) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@
 		regs->cp0_epc = epc;
 		break;
 
-	case bgtz_op:
 	case bgtzl_op:
+		if (NO_R6EMU)
+			goto sigill_r6;
+	case bgtz_op:
+		/*
+		 * Compact branches for R6 for the
+		 * bgtz and bgtzl opcodes.
+		 * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+		 * BGTZ  | rs = rt != 0      == BLTZALC
+		 * BGTZ  | rs != 0 | rt != 0 == BLTUC
+		 * BGTZL | rs = 0 | rt != 0  == BGTZC
+		 * BGTZL | rs = rt != 0      == BLTZC
+		 * BGTZL | rs != 0 | rt != 0 == BLTC
+		 *
+		 * *ZALC varint for BGTZ &&& rt != 0
+		 * For real GTZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			    (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = epc + 4;
+			regs->cp0_epc += 8;
+			break;
+		}
+
 		/* rt field assumed to be zero */
 		if ((long)regs->regs[insn.i_format.rs] > 0) {
 			epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@
 	 * And now the FPA/cp1 branch instructions.
 	 */
 	case cop1_op:
-		preempt_disable();
-		if (is_fpu_owner())
-		        fcr31 = read_32bit_cp1_register(CP1_STATUS);
-		else
-			fcr31 = current->thread.fpu.fcr31;
-		preempt_enable();
-
-		bit = (insn.i_format.rt >> 2);
-		bit += (bit != 0);
-		bit += 23;
-		switch (insn.i_format.rt & 3) {
-		case 0: /* bc1f */
-		case 2: /* bc1fl */
-			if (~fcr31 & (1 << bit)) {
-				epc = epc + 4 + (insn.i_format.simmediate << 2);
-				if (insn.i_format.rt == 2)
-					ret = BRANCH_LIKELY_TAKEN;
-			} else
+		if (cpu_has_mips_r6 &&
+		    ((insn.i_format.rs == bc1eqz_op) ||
+		     (insn.i_format.rs == bc1nez_op))) {
+			if (!used_math()) { /* First time FPU user */
+				ret = init_fpu();
+				if (ret && NO_R6EMU) {
+					ret = -ret;
+					break;
+				}
+				ret = 0;
+				set_used_math();
+			}
+			lose_fpu(1);    /* Save FPU state for the emulator. */
+			reg = insn.i_format.rt;
+			bit = 0;
+			switch (insn.i_format.rs) {
+			case bc1eqz_op:
+				/* Test bit 0 */
+				if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
+				    & 0x1)
+					bit = 1;
+				break;
+			case bc1nez_op:
+				/* Test bit 0 */
+				if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
+				      & 0x1))
+					bit = 1;
+				break;
+			}
+			own_fpu(1);
+			if (bit)
+				epc = epc + 4 +
+					(insn.i_format.simmediate << 2);
+			else
 				epc += 8;
 			regs->cp0_epc = epc;
+
 			break;
+		} else {
 
-		case 1: /* bc1t */
-		case 3: /* bc1tl */
-			if (fcr31 & (1 << bit)) {
-				epc = epc + 4 + (insn.i_format.simmediate << 2);
-				if (insn.i_format.rt == 3)
-					ret = BRANCH_LIKELY_TAKEN;
-			} else
-				epc += 8;
-			regs->cp0_epc = epc;
+			preempt_disable();
+			if (is_fpu_owner())
+			        fcr31 = read_32bit_cp1_register(CP1_STATUS);
+			else
+				fcr31 = current->thread.fpu.fcr31;
+			preempt_enable();
+
+			bit = (insn.i_format.rt >> 2);
+			bit += (bit != 0);
+			bit += 23;
+			switch (insn.i_format.rt & 3) {
+			case 0: /* bc1f */
+			case 2: /* bc1fl */
+				if (~fcr31 & (1 << bit)) {
+					epc = epc + 4 +
+						(insn.i_format.simmediate << 2);
+					if (insn.i_format.rt == 2)
+						ret = BRANCH_LIKELY_TAKEN;
+				} else
+					epc += 8;
+				regs->cp0_epc = epc;
+				break;
+
+			case 1: /* bc1t */
+			case 3: /* bc1tl */
+				if (fcr31 & (1 << bit)) {
+					epc = epc + 4 +
+						(insn.i_format.simmediate << 2);
+					if (insn.i_format.rt == 3)
+						ret = BRANCH_LIKELY_TAKEN;
+				} else
+					epc += 8;
+				regs->cp0_epc = epc;
+				break;
+			}
 			break;
 		}
-		break;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	case lwc2_op: /* This is bbit0 on Octeon */
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@
 			epc += 8;
 		regs->cp0_epc = epc;
 		break;
+#else
+	case bc6_op:
+		/* Only valid for MIPS R6 */
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		regs->cp0_epc += 8;
+		break;
+	case balc6_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BALC */
+		regs->regs[31] = epc + 4;
+		epc += 4 + (insn.i_format.simmediate << 2);
+		regs->cp0_epc = epc;
+		break;
+	case beqzcjic_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BEQZC || JIC */
+		regs->cp0_epc += 8;
+		break;
+	case bnezcjialc_op:
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/* Compact branch: BNEZC || JIALC */
+		if (insn.i_format.rs)
+			regs->regs[31] = epc + 4;
+		regs->cp0_epc += 8;
+		break;
 #endif
+	case cbcond0_op:
+	case cbcond1_op:
+		/* Only valid for MIPS R6 */
+		if (!cpu_has_mips_r6) {
+			ret = -SIGILL;
+			break;
+		}
+		/*
+		 * Compact branches:
+		 * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+		 */
+		if (insn.i_format.rt && !insn.i_format.rs)
+			regs->regs[31] = epc + 4;
+		regs->cp0_epc += 8;
+		break;
 	}
 
 	return ret;
 
-sigill:
+sigill_dsp:
 	printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
 	force_sig(SIGBUS, current);
 	return -EFAULT;
+sigill_r6:
+	pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+		current->comm);
+	force_sig(SIGILL, current);
+	return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 6acaad0..82bd2b2 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -11,7 +11,6 @@
 #include <linux/percpu.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
 
 #include <asm/time.h>
 #include <asm/cevt-r4k.h>
@@ -40,7 +39,7 @@
 
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
-	const int r2 = cpu_has_mips_r2;
+	const int r2 = cpu_has_mips_r2_r6;
 	struct clock_event_device *cd;
 	int cpu = smp_processor_id();
 
@@ -85,10 +84,7 @@
  */
 static int c0_compare_int_pending(void)
 {
-#ifdef CONFIG_MIPS_GIC
-	if (gic_present)
-		return gic_get_timer_pending();
-#endif
+	/* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
 	return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
 }
 
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 0384b05..55b759a 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -99,11 +99,11 @@
 	xori	t2, t1, 0x7
 	beqz	t2, 1f
 	 li	t3, 32
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 	sllv	t1, t3, t1
 1:	/* At this point t1 == I-cache sets per way */
 	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-	addi	t2, t2, 1
+	addiu	t2, t2, 1
 	mul	t1, t1, t0
 	mul	t1, t1, t2
 
@@ -126,11 +126,11 @@
 	xori	t2, t1, 0x7
 	beqz	t2, 1f
 	 li	t3, 32
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 	sllv	t1, t3, t1
 1:	/* At this point t1 == D-cache sets per way */
 	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-	addi	t2, t2, 1
+	addiu	t2, t2, 1
 	mul	t1, t1, t0
 	mul	t1, t1, t2
 
@@ -250,7 +250,7 @@
 	mfc0	t0, CP0_MVPCONF0
 	srl	t0, t0, MVPCONF0_PVPE_SHIFT
 	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-	addi	t7, t0, 1
+	addiu	t7, t0, 1
 
 	/* If there's only 1, we're done */
 	beqz	t0, 2f
@@ -280,7 +280,7 @@
 	mttc0	t0, CP0_TCHALT
 
 	/* Next VPE */
-	addi	t5, t5, 1
+	addiu	t5, t5, 1
 	slt	t0, t5, t7
 	bnez	t0, 1b
 	 nop
@@ -317,7 +317,7 @@
 	mfc0	t1, CP0_MVPCONF0
 	srl	t1, t1, MVPCONF0_PVPE_SHIFT
 	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
-	addi	t1, t1, 1
+	addiu	t1, t1, 1
 
 	/* Calculate a mask for the VPE ID from EBase.CPUNum */
 	clz	t1, t1
@@ -424,7 +424,7 @@
 
 	/* Next VPE */
 2:	srl	t6, t6, 1
-	addi	t5, t5, 1
+	addiu	t5, t5, 1
 	bnez	t6, 1b
 	 nop
 
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 2d80b5f..09f4034 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@
 	panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 
-int daddiu_bug	= -1;
+int daddiu_bug	= config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
 static inline void check_daddiu(void)
 {
@@ -314,11 +314,14 @@
 
 void __init check_bugs64_early(void)
 {
-	check_mult_sh();
-	check_daddiu();
+	if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+		check_mult_sh();
+		check_daddiu();
+	}
 }
 
 void __init check_bugs64(void)
 {
-	check_daddi();
+	if (!config_enabled(CONFIG_CPU_MIPSR6))
+		check_daddi();
 }
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5342674..48dfb9d 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -237,6 +237,13 @@
 		c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
 		break;
 
+	/* R6 incompatible with everything else */
+	case MIPS_CPU_ISA_M64R6:
+		c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+	case MIPS_CPU_ISA_M32R6:
+		c->isa_level |= MIPS_CPU_ISA_M32R6;
+		/* Break here so we don't add incompatible ISAs */
+		break;
 	case MIPS_CPU_ISA_M32R2:
 		c->isa_level |= MIPS_CPU_ISA_M32R2;
 	case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@
 		case 1:
 			set_isa(c, MIPS_CPU_ISA_M32R2);
 			break;
+		case 2:
+			set_isa(c, MIPS_CPU_ISA_M32R6);
+			break;
 		default:
 			goto unknown;
 		}
@@ -338,6 +348,9 @@
 		case 1:
 			set_isa(c, MIPS_CPU_ISA_M64R2);
 			break;
+		case 2:
+			set_isa(c, MIPS_CPU_ISA_M64R6);
+			break;
 		default:
 			goto unknown;
 		}
@@ -424,8 +437,10 @@
 	if (config3 & MIPS_CONF3_MSA)
 		c->ases |= MIPS_ASE_MSA;
 	/* Only tested on 32-bit cores */
-	if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
+	if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
+		c->htw_seq = 0;
 		c->options |= MIPS_CPU_HTW;
+	}
 
 	return config3 & MIPS_CONF_M;
 }
@@ -499,6 +514,8 @@
 		c->options |= MIPS_CPU_EVA;
 	if (config5 & MIPS_CONF5_MRP)
 		c->options |= MIPS_CPU_MAAR;
+	if (config5 & MIPS_CONF5_LLB)
+		c->options |= MIPS_CPU_RW_LLB;
 
 	return config5 & MIPS_CONF_M;
 }
@@ -533,7 +550,7 @@
 
 	if (cpu_has_rixi) {
 		/* Enable the RIXI exceptions */
-		write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+		set_c0_pagegrain(PG_IEC);
 		back_to_back_c0_hazard();
 		/* Verify the IEC bit is set */
 		if (read_c0_pagegrain() & PG_IEC)
@@ -541,7 +558,7 @@
 	}
 
 #ifndef CONFIG_MIPS_CPS
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		c->core = get_ebase_cpunum();
 		if (cpu_has_mipsmt)
 			c->core >>= fls(core_nvpes()) - 1;
@@ -896,6 +913,11 @@
 {
 	c->writecombine = _CACHE_UNCACHED_ACCELERATED;
 	switch (c->processor_id & PRID_IMP_MASK) {
+	case PRID_IMP_QEMU_GENERIC:
+		c->writecombine = _CACHE_UNCACHED;
+		c->cputype = CPU_QEMU_GENERIC;
+		__cpu_name[cpu] = "MIPS GENERIC QEMU";
+		break;
 	case PRID_IMP_4KC:
 		c->cputype = CPU_4KC;
 		c->writecombine = _CACHE_UNCACHED;
@@ -1345,8 +1367,7 @@
 	if (c->options & MIPS_CPU_FPU) {
 		c->fpu_id = cpu_get_fpu_id();
 
-		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+		if (c->isa_level & cpu_has_mips_r) {
 			if (c->fpu_id & MIPS_FPIR_3D)
 				c->ases |= MIPS_ASE_MIPS3D;
 			if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1354,7 +1375,7 @@
 		}
 	}
 
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
 		/* R2 has Performance Counter Interrupt indicator */
 		c->options |= MIPS_CPU_PCI;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index a5b5b56..d2c09f6 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -11,29 +11,112 @@
 #include <linux/elf.h>
 #include <linux/sched.h>
 
+/* FPU modes */
 enum {
-	FP_ERROR = -1,
-	FP_DOUBLE_64A = -2,
+	FP_FRE,
+	FP_FR0,
+	FP_FR1,
 };
 
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single:	The program being loaded needs an FPU but it will only issue
+ *		single precision instructions meaning that it can execute in
+ *		either FR0 or FR1.
+ * @soft:	The soft(-float) requirement means that the program being
+ *		loaded needs has no FPU dependency at all (i.e. it has no
+ *		FPU instructions).
+ * @fr1:	The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault:	The program being loaded depends on the default FPU mode.
+ *		That is FR0 for O32 and FR1 for N32/N64.
+ * @fre:	The program being loaded depends on FPU with FRE=1. This mode is
+ *		a bridge which uses FR=1 whilst still being able to maintain
+ *		full compatibility with pre-existing code using the O32 FP32
+ *		ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+	bool single;
+	bool soft;
+	bool fr1;
+	bool frdefault;
+	bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+	[MIPS_ABI_FP_ANY]    = { true,  true,  true,  true,  true  },
+	[MIPS_ABI_FP_DOUBLE] = { false, false, false, true,  true  },
+	[MIPS_ABI_FP_SINGLE] = { true,  false, false, false, false },
+	[MIPS_ABI_FP_SOFT]   = { false, true,  false, false, false },
+	[MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+	[MIPS_ABI_FP_XX]     = { false, false, true,  true,  true  },
+	[MIPS_ABI_FP_64]     = { false, false, true,  false, false },
+	[MIPS_ABI_FP_64A]    = { false, false, true,  false, true  }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
 		     bool is_interp, struct arch_elf_state *state)
 {
-	struct elf32_hdr *ehdr = _ehdr;
-	struct elf32_phdr *phdr = _phdr;
+	struct elf32_hdr *ehdr32 = _ehdr;
+	struct elf32_phdr *phdr32 = _phdr;
+	struct elf64_phdr *phdr64 = _phdr;
 	struct mips_elf_abiflags_v0 abiflags;
 	int ret;
 
-	if (config_enabled(CONFIG_64BIT) &&
-	    (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
-		return 0;
-	if (phdr->p_type != PT_MIPS_ABIFLAGS)
-		return 0;
-	if (phdr->p_filesz < sizeof(abiflags))
-		return -EINVAL;
+	/* Lets see if this is an O32 ELF */
+	if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+		/* FR = 1 for N32 */
+		if (ehdr32->e_flags & EF_MIPS_ABI2)
+			state->overall_fp_mode = FP_FR1;
+		else
+			/* Set a good default FPU mode for O32 */
+			state->overall_fp_mode = cpu_has_mips_r6 ?
+				FP_FRE : FP_FR0;
 
-	ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
-			  sizeof(abiflags));
+		if (ehdr32->e_flags & EF_MIPS_FP64) {
+			/*
+			 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+			 * later if needed
+			 */
+			if (is_interp)
+				state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+			else
+				state->fp_abi = MIPS_ABI_FP_OLD_64;
+		}
+		if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+			return 0;
+
+		if (phdr32->p_filesz < sizeof(abiflags))
+			return -EINVAL;
+
+		ret = kernel_read(elf, phdr32->p_offset,
+				  (char *)&abiflags,
+				  sizeof(abiflags));
+	} else {
+		/* FR=1 is really the only option for 64-bit */
+		state->overall_fp_mode = FP_FR1;
+
+		if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+			return 0;
+		if (phdr64->p_filesz < sizeof(abiflags))
+			return -EINVAL;
+
+		ret = kernel_read(elf, phdr64->p_offset,
+				  (char *)&abiflags,
+				  sizeof(abiflags));
+	}
+
 	if (ret < 0)
 		return ret;
 	if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@
 	return 0;
 }
 
-static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(int in_abi)
 {
 	/* If the ABI requirement is provided, simply return that */
-	if (in_abi != -1)
+	if (in_abi != MIPS_ABI_FP_UNKNOWN)
 		return in_abi;
 
-	/* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
-	if (ehdr->e_flags & EF_MIPS_FP64)
-		return MIPS_ABI_FP_64;
-
-	/* Default to MIPS_ABI_FP_DOUBLE */
-	return MIPS_ABI_FP_DOUBLE;
+	/* Unknown ABI */
+	return MIPS_ABI_FP_UNKNOWN;
 }
 
 int arch_check_elf(void *_ehdr, bool has_interpreter,
 		   struct arch_elf_state *state)
 {
 	struct elf32_hdr *ehdr = _ehdr;
-	unsigned fp_abi, interp_fp_abi, abi0, abi1;
+	struct mode_req prog_req, interp_req;
+	int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
 
-	/* Ignore non-O32 binaries */
-	if (config_enabled(CONFIG_64BIT) &&
-	    (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+	if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
 		return 0;
 
-	fp_abi = get_fp_abi(ehdr, state->fp_abi);
+	fp_abi = get_fp_abi(state->fp_abi);
 
 	if (has_interpreter) {
-		interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+		interp_fp_abi = get_fp_abi(state->interp_fp_abi);
 
 		abi0 = min(fp_abi, interp_fp_abi);
 		abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@
 		abi0 = abi1 = fp_abi;
 	}
 
-	state->overall_abi = FP_ERROR;
+	/* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+	max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+		   (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+		MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
 
-	if (abi0 == abi1) {
-		state->overall_abi = abi0;
-	} else if (abi0 == MIPS_ABI_FP_ANY) {
-		state->overall_abi = abi1;
-	} else if (abi0 == MIPS_ABI_FP_DOUBLE) {
-		switch (abi1) {
-		case MIPS_ABI_FP_XX:
-			state->overall_abi = MIPS_ABI_FP_DOUBLE;
-			break;
-
-		case MIPS_ABI_FP_64A:
-			state->overall_abi = FP_DOUBLE_64A;
-			break;
-		}
-	} else if (abi0 == MIPS_ABI_FP_SINGLE ||
-		   abi0 == MIPS_ABI_FP_SOFT) {
-		/* Cannot link with other ABIs */
-	} else if (abi0 == MIPS_ABI_FP_OLD_64) {
-		switch (abi1) {
-		case MIPS_ABI_FP_XX:
-		case MIPS_ABI_FP_64:
-		case MIPS_ABI_FP_64A:
-			state->overall_abi = MIPS_ABI_FP_64;
-			break;
-		}
-	} else if (abi0 == MIPS_ABI_FP_XX ||
-		   abi0 == MIPS_ABI_FP_64 ||
-		   abi0 == MIPS_ABI_FP_64A) {
-		state->overall_abi = MIPS_ABI_FP_64;
-	}
-
-	switch (state->overall_abi) {
-	case MIPS_ABI_FP_64:
-	case MIPS_ABI_FP_64A:
-	case FP_DOUBLE_64A:
-		if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-			return -ELIBBAD;
-		break;
-
-	case FP_ERROR:
+	if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+	    (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
 		return -ELIBBAD;
-	}
+
+	/* It's time to determine the FPU mode requirements */
+	prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+	interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
+
+	/*
+	 * Check whether the program's and interp's ABIs have a matching FPU
+	 * mode requirement.
+	 */
+	prog_req.single = interp_req.single && prog_req.single;
+	prog_req.soft = interp_req.soft && prog_req.soft;
+	prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+	prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+	prog_req.fre = interp_req.fre && prog_req.fre;
+
+	/*
+	 * Determine the desired FPU mode
+	 *
+	 * Decision making:
+	 *
+	 * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+	 *   means that we have a combination of program and interpreter
+	 *   that inherently require the hybrid FP mode.
+	 * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+	 *   fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+	 *   instructions so we don't care about the mode. We will simply use
+	 *   the one preferred by the hardware. In fpxx case, that ABI can
+	 *   handle both FR=1 and FR=0, so, again, we simply choose the one
+	 *   preferred by the hardware. Next, if we only use single-precision
+	 *   FPU instructions, and the default ABI FPU mode is not good
+	 *   (ie single + any ABI combination), we set again the FPU mode to the
+	 *   one is preferred by the hardware. Next, if we know that the code
+	 *   will only use single-precision instructions, shown by single being
+	 *   true but frdefault being false, then we again set the FPU mode to
+	 *   the one that is preferred by the hardware.
+	 * - We want FP_FR1 if that's the only matching mode and the default one
+	 *   is not good.
+	 * - Return with -ELIBADD if we can't find a matching FPU mode.
+	 */
+	if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+		state->overall_fp_mode = FP_FRE;
+	else if ((prog_req.fr1 && prog_req.frdefault) ||
+		 (prog_req.single && !prog_req.frdefault))
+		/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+		state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+					  cpu_has_mips_r2_r6) ?
+					  FP_FR1 : FP_FR0;
+	else if (prog_req.fr1)
+		state->overall_fp_mode = FP_FR1;
+	else  if (!prog_req.fre && !prog_req.frdefault &&
+		  !prog_req.fr1 && !prog_req.single && !prog_req.soft)
+		return -ELIBBAD;
 
 	return 0;
 }
 
+static inline void set_thread_fp_mode(int hybrid, int regs32)
+{
+	if (hybrid)
+		set_thread_flag(TIF_HYBRID_FPREGS);
+	else
+		clear_thread_flag(TIF_HYBRID_FPREGS);
+	if (regs32)
+		set_thread_flag(TIF_32BIT_FPREGS);
+	else
+		clear_thread_flag(TIF_32BIT_FPREGS);
+}
+
 void mips_set_personality_fp(struct arch_elf_state *state)
 {
-	if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
-		/*
-		 * Use hybrid FPRs for all code which can correctly execute
-		 * with that mode.
-		 */
-		switch (state->overall_abi) {
-		case MIPS_ABI_FP_DOUBLE:
-		case MIPS_ABI_FP_SINGLE:
-		case MIPS_ABI_FP_SOFT:
-		case MIPS_ABI_FP_XX:
-		case MIPS_ABI_FP_ANY:
-			/* FR=1, FRE=1 */
-			clear_thread_flag(TIF_32BIT_FPREGS);
-			set_thread_flag(TIF_HYBRID_FPREGS);
-			return;
-		}
-	}
+	/*
+	 * This function is only ever called for O32 ELFs so we should
+	 * not be worried about N32/N64 binaries.
+	 */
 
-	switch (state->overall_abi) {
-	case MIPS_ABI_FP_DOUBLE:
-	case MIPS_ABI_FP_SINGLE:
-	case MIPS_ABI_FP_SOFT:
-		/* FR=0 */
-		set_thread_flag(TIF_32BIT_FPREGS);
-		clear_thread_flag(TIF_HYBRID_FPREGS);
+	if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+		return;
+
+	switch (state->overall_fp_mode) {
+	case FP_FRE:
+		set_thread_fp_mode(1, 0);
 		break;
-
-	case FP_DOUBLE_64A:
-		/* FR=1, FRE=1 */
-		clear_thread_flag(TIF_32BIT_FPREGS);
-		set_thread_flag(TIF_HYBRID_FPREGS);
+	case FP_FR0:
+		set_thread_fp_mode(0, 1);
 		break;
-
-	case MIPS_ABI_FP_64:
-	case MIPS_ABI_FP_64A:
-		/* FR=1, FRE=0 */
-		clear_thread_flag(TIF_32BIT_FPREGS);
-		clear_thread_flag(TIF_HYBRID_FPREGS);
+	case FP_FR1:
+		set_thread_fp_mode(0, 0);
 		break;
-
-	case MIPS_ABI_FP_XX:
-	case MIPS_ABI_FP_ANY:
-		if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-			set_thread_flag(TIF_32BIT_FPREGS);
-		else
-			clear_thread_flag(TIF_32BIT_FPREGS);
-
-		clear_thread_flag(TIF_HYBRID_FPREGS);
-		break;
-
 	default:
-	case FP_ERROR:
 		BUG();
 	}
 }
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 4353d32..af41ba6 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -46,6 +46,11 @@
 	local_irq_disable		# make sure we dont miss an
 					# interrupt setting need_resched
 					# between sampling and return
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+	lw	k0, TI_R2_EMUL_RET($28)
+	bnez	k0, restore_all_from_r2_emul
+#endif
+
 	LONG_L	a2, TI_FLAGS($28)	# current->work
 	andi	t0, a2, _TIF_WORK_MASK	# (ignoring syscall_trace)
 	bnez	t0, work_pending
@@ -114,6 +119,19 @@
 	RESTORE_SP_AND_RET
 	.set	at
 
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+restore_all_from_r2_emul:			# restore full frame
+	.set	noat
+	sw	zero, TI_R2_EMUL_RET($28)	# reset it
+	RESTORE_TEMP
+	RESTORE_AT
+	RESTORE_STATIC
+	RESTORE_SOME
+	LONG_L	sp, PT_R29(sp)
+	eretnc
+	.set	at
+#endif
+
 work_pending:
 	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
 	beqz	t0, work_notifysig
@@ -158,7 +176,8 @@
 	jal	syscall_trace_leave
 	b	resume_userspace
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
+    defined(CONFIG_MIPS_MT)
 
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@
 	nop
 	END(mips_ihb)
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a5e26dd..2ebaabe 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -125,7 +125,7 @@
 	nop
 	nop
 #endif
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 	wait
 	/* end of rollback region (the region size must be power of two) */
 1:
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 0b9082b..368c88b 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -186,6 +186,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_M5150:
+	case CPU_QEMU_GENERIC:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644
index 0000000..64d17e4
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -0,0 +1,2378 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *      MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU	"daddiu "
+#define INS	"dins "
+#define EXT	"dext "
+#else
+#define ADDIU	"addiu "
+#define INS	"ins "
+#define EXT	"ext "
+#endif /* CONFIG_64BIT */
+
+#define SB	"sb "
+#define LB	"lb "
+#define LL	"ll "
+#define SC	"sc "
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS	10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+	mipsr2_emulation = 1;
+
+	pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+	return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+	switch (MIPSInst_OPCODE(ir)) {
+	case addiu_op:
+		if (MIPSInst_RT(ir))
+			regs->regs[MIPSInst_RT(ir)] =
+				(s32)regs->regs[MIPSInst_RS(ir)] +
+				(s32)MIPSInst_SIMM(ir);
+		return 0;
+	case daddiu_op:
+		if (config_enabled(CONFIG_32BIT))
+			break;
+
+		if (MIPSInst_RT(ir))
+			regs->regs[MIPSInst_RT(ir)] =
+				(s64)regs->regs[MIPSInst_RS(ir)] +
+				(s64)MIPSInst_SIMM(ir);
+		return 0;
+	case lwc1_op:
+	case swc1_op:
+	case cop1_op:
+	case cop1x_op:
+		/* FPU instructions in delay slot */
+		return -SIGFPE;
+	case spec_op:
+		switch (MIPSInst_FUNC(ir)) {
+		case or_op:
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					regs->regs[MIPSInst_RS(ir)] |
+					regs->regs[MIPSInst_RT(ir)];
+			return 0;
+		case sll_op:
+			if (MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+						MIPSInst_FD(ir));
+			return 0;
+		case srl_op:
+			if (MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+						MIPSInst_FD(ir));
+			return 0;
+		case addu_op:
+			if (MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+					      (u32)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		case subu_op:
+			if (MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+					      (u32)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		case dsll_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+						MIPSInst_FD(ir));
+			return 0;
+		case dsrl_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+						MIPSInst_FD(ir));
+			return 0;
+		case daddu_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(u64)regs->regs[MIPSInst_RS(ir)] +
+					(u64)regs->regs[MIPSInst_RT(ir)];
+			return 0;
+		case dsubu_op:
+			if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+				break;
+
+			if (MIPSInst_RD(ir))
+				regs->regs[MIPSInst_RD(ir)] =
+					(s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+					      (u64)regs->regs[MIPSInst_RT(ir)]);
+			return 0;
+		}
+		break;
+	default:
+		pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+			 ir, MIPSInst_OPCODE(ir));
+	}
+
+	return SIGILL;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+	u32 csr;
+	u32 cond;
+
+	csr = current->thread.fpu.fcr31;
+	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+	if (((csr & cond) == 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+	return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+	u32 csr;
+	u32 cond;
+
+	csr = current->thread.fpu.fcr31;
+	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+	if (((csr & cond) != 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+	int err;
+	unsigned long cepc, epc, nepc;
+	u32 nir;
+
+	if (delay_slot(regs))
+		return SIGILL;
+
+	/* EPC after the RI/JR instruction */
+	nepc = regs->cp0_epc;
+	/* Roll back to the reserved R2 JR instruction */
+	regs->cp0_epc -= 4;
+	epc = regs->cp0_epc;
+	err = __compute_return_epc(regs);
+
+	if (err < 0)
+		return SIGEMT;
+
+
+	/* Computed EPC */
+	cepc = regs->cp0_epc;
+
+	/* Get DS instruction */
+	err = __get_user(nir, (u32 __user *)nepc);
+	if (err)
+		return SIGSEGV;
+
+	MIPS_R2BR_STATS(jrs);
+
+	/* If nir == 0(NOP), then nothing else to do */
+	if (nir) {
+		/*
+		 * Negative err means FPU instruction in BD-slot,
+		 * Zero err means 'BD-slot emulation done'
+		 * For anything else we go back to trampoline emulation.
+		 */
+		err = mipsr6_emul(regs, nir);
+		if (err > 0) {
+			regs->cp0_epc = nepc;
+			err = mips_dsemul(regs, nir, cepc);
+			if (err == SIGILL)
+				err = SIGEMT;
+			MIPS_R2_STATS(dsemul);
+		}
+	}
+
+	return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+	if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+	if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+	MIPS_R2_STATS(movs);
+
+	return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+	if (MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+	regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+	if (MIPSInst_RD(ir))
+		regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+	regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+	MIPS_R2_STATS(hilo);
+
+	return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+
+	rs = res;
+	regs->lo = (s64)rs;
+	rt = res >> 32;
+	res = (s64)rt;
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = res;
+	regs->lo = (s64)rt;
+	regs->hi = (s64)(res >> 32);
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = (s64)(rs / rt);
+	regs->hi = (s64)(rs % rt);
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = (s64)(rs / rt);
+	regs->hi = (s64)(rs % rt);
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = rt * rs;
+
+	regs->lo = res;
+	__asm__ __volatile__(
+		"dmuh %0, %1, %2\t\n"
+		: "=r"(res)
+		: "r"(rt), "r"(rs));
+
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = rt * rs;
+
+	regs->lo = res;
+	__asm__ __volatile__(
+		"dmuhu %0, %1, %2\t\n"
+		: "=r"(res)
+		: "r"(rt), "r"(rs));
+
+	regs->hi = res;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+	s64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = rs / rt;
+	regs->hi = rs % rt;
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 rt, rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+
+	regs->lo = rs / rt;
+	regs->hi = rs % rt;
+
+	MIPS_R2_STATS(divs);
+
+	return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static struct r2_decoder_table spec_op_table[] = {
+	{ 0xfc1ff83f, 0x00000008, jr_func },
+	{ 0xfc00ffff, 0x00000018, mult_func },
+	{ 0xfc00ffff, 0x00000019, multu_func },
+	{ 0xfc00ffff, 0x0000001c, dmult_func },
+	{ 0xfc00ffff, 0x0000001d, dmultu_func },
+	{ 0xffff07ff, 0x00000010, mfhi_func },
+	{ 0xfc1fffff, 0x00000011, mthi_func },
+	{ 0xffff07ff, 0x00000012, mflo_func },
+	{ 0xfc1fffff, 0x00000013, mtlo_func },
+	{ 0xfc0307ff, 0x00000001, movf_func },
+	{ 0xfc0307ff, 0x00010001, movt_func },
+	{ 0xfc0007ff, 0x0000000a, movz_func },
+	{ 0xfc0007ff, 0x0000000b, movn_func },
+	{ 0xfc00ffff, 0x0000001a, div_func },
+	{ 0xfc00ffff, 0x0000001b, divu_func },
+	{ 0xfc00ffff, 0x0000001e, ddiv_func },
+	{ 0xfc00ffff, 0x0000001f, ddivu_func },
+	{}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res += ((((s64)rt) << 32) | (u32)rs);
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res += ((((s64)rt) << 32) | (u32)rs);
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u32 rt, rs;
+
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (u64)rt * (u64)rs;
+	rt = regs->hi;
+	rs = regs->lo;
+	res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+	rt = res;
+	regs->lo = (s64)rt;
+	rs = res >> 32;
+	regs->hi = (s64)rs;
+
+	MIPS_R2_STATS(dsps);
+
+	return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+	s64 res;
+	s32 rt, rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+	rt = regs->regs[MIPSInst_RT(ir)];
+	rs = regs->regs[MIPSInst_RS(ir)];
+	res = (s64)rt * (s64)rs;
+
+	rs = res;
+	regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+	MIPS_R2_STATS(muls);
+
+	return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+	u32 res;
+	u32 rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+	u32 res;
+	u32 rs;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+	u64 res;
+	u64 rs;
+
+	if (config_enabled(CONFIG_32BIT))
+		return SIGILL;
+
+	if (!MIPSInst_RD(ir))
+		return 0;
+
+	rs = regs->regs[MIPSInst_RS(ir)];
+	__asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+	regs->regs[MIPSInst_RD(ir)] = res;
+
+	MIPS_R2_STATS(bops);
+
+	return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static struct r2_decoder_table spec2_op_table[] = {
+	{ 0xfc00ffff, 0x70000000, madd_func },
+	{ 0xfc00ffff, 0x70000001, maddu_func },
+	{ 0xfc0007ff, 0x70000002, mul_func },
+	{ 0xfc00ffff, 0x70000004, msub_func },
+	{ 0xfc00ffff, 0x70000005, msubu_func },
+	{ 0xfc0007ff, 0x70000020, clz_func },
+	{ 0xfc0007ff, 0x70000021, clo_func },
+	{ 0xfc0007ff, 0x70000024, dclz_func },
+	{ 0xfc0007ff, 0x70000025, dclo_func },
+	{ }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+				      struct r2_decoder_table *table)
+{
+	struct r2_decoder_table *p;
+	int err;
+
+	for (p = table; p->func; p++) {
+		if ((inst & p->mask) == p->code) {
+			err = (p->func)(regs, inst);
+			return err;
+		}
+	}
+	return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+{
+	int err = 0;
+	unsigned long vaddr;
+	u32 nir;
+	unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+	void __user *fault_addr = NULL;
+	int pass = 0;
+
+repeat:
+	r31 = regs->regs[31];
+	epc = regs->cp0_epc;
+	err = compute_return_epc(regs);
+	if (err < 0) {
+		BUG();
+		return SIGEMT;
+	}
+	pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+		 inst, epc, pass);
+
+	switch (MIPSInst_OPCODE(inst)) {
+	case spec_op:
+		err = mipsr2_find_op_func(regs, inst, spec_op_table);
+		if (err < 0) {
+			/* FPU instruction under JR */
+			regs->cp0_cause |= CAUSEF_BD;
+			goto fpu_emul;
+		}
+		break;
+	case spec2_op:
+		err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+		break;
+	case bcond_op:
+		rt = MIPSInst_RT(inst);
+		rs = MIPSInst_RS(inst);
+		switch (rt) {
+		case tgei_op:
+			if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TGEI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tgeiu_op:
+			if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+				do_trap_or_bp(regs, 0, "TGEIU");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tlti_op:
+			if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TLTI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tltiu_op:
+			if (regs->regs[rs] < MIPSInst_UIMM(inst))
+				do_trap_or_bp(regs, 0, "TLTIU");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case teqi_op:
+			if (regs->regs[rs] == MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TEQI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case tnei_op:
+			if (regs->regs[rs] != MIPSInst_SIMM(inst))
+				do_trap_or_bp(regs, 0, "TNEI");
+
+			MIPS_R2_STATS(traps);
+
+			break;
+		case bltzl_op:
+		case bgezl_op:
+		case bltzall_op:
+		case bgezall_op:
+			if (delay_slot(regs)) {
+				err = SIGILL;
+				break;
+			}
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = __compute_return_epc(regs);
+			if (err < 0)
+				return SIGEMT;
+			if (err != BRANCH_LIKELY_TAKEN)
+				break;
+			cpc = regs->cp0_epc;
+			nepc = epc + 4;
+			err = __get_user(nir, (u32 __user *)nepc);
+			if (err) {
+				err = SIGSEGV;
+				break;
+			}
+			/*
+			 * This will probably be optimized away when
+			 * CONFIG_DEBUG_FS is not enabled
+			 */
+			switch (rt) {
+			case bltzl_op:
+				MIPS_R2BR_STATS(bltzl);
+				break;
+			case bgezl_op:
+				MIPS_R2BR_STATS(bgezl);
+				break;
+			case bltzall_op:
+				MIPS_R2BR_STATS(bltzall);
+				break;
+			case bgezall_op:
+				MIPS_R2BR_STATS(bgezall);
+				break;
+			}
+
+			switch (MIPSInst_OPCODE(nir)) {
+			case cop1_op:
+			case cop1x_op:
+			case lwc1_op:
+			case swc1_op:
+				regs->cp0_cause |= CAUSEF_BD;
+				goto fpu_emul;
+			}
+			if (nir) {
+				err = mipsr6_emul(regs, nir);
+				if (err > 0) {
+					err = mips_dsemul(regs, nir, cpc);
+					if (err == SIGILL)
+						err = SIGEMT;
+					MIPS_R2_STATS(dsemul);
+				}
+			}
+			break;
+		case bltzal_op:
+		case bgezal_op:
+			if (delay_slot(regs)) {
+				err = SIGILL;
+				break;
+			}
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = __compute_return_epc(regs);
+			if (err < 0)
+				return SIGEMT;
+			cpc = regs->cp0_epc;
+			nepc = epc + 4;
+			err = __get_user(nir, (u32 __user *)nepc);
+			if (err) {
+				err = SIGSEGV;
+				break;
+			}
+			/*
+			 * This will probably be optimized away when
+			 * CONFIG_DEBUG_FS is not enabled
+			 */
+			switch (rt) {
+			case bltzal_op:
+				MIPS_R2BR_STATS(bltzal);
+				break;
+			case bgezal_op:
+				MIPS_R2BR_STATS(bgezal);
+				break;
+			}
+
+			switch (MIPSInst_OPCODE(nir)) {
+			case cop1_op:
+			case cop1x_op:
+			case lwc1_op:
+			case swc1_op:
+				regs->cp0_cause |= CAUSEF_BD;
+				goto fpu_emul;
+			}
+			if (nir) {
+				err = mipsr6_emul(regs, nir);
+				if (err > 0) {
+					err = mips_dsemul(regs, nir, cpc);
+					if (err == SIGILL)
+						err = SIGEMT;
+					MIPS_R2_STATS(dsemul);
+				}
+			}
+			break;
+		default:
+			regs->regs[31] = r31;
+			regs->cp0_epc = epc;
+			err = SIGILL;
+			break;
+		}
+		break;
+
+	case beql_op:
+	case bnel_op:
+	case blezl_op:
+	case bgtzl_op:
+		if (delay_slot(regs)) {
+			err = SIGILL;
+			break;
+		}
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+		err = __compute_return_epc(regs);
+		if (err < 0)
+			return SIGEMT;
+		if (err != BRANCH_LIKELY_TAKEN)
+			break;
+		cpc = regs->cp0_epc;
+		nepc = epc + 4;
+		err = __get_user(nir, (u32 __user *)nepc);
+		if (err) {
+			err = SIGSEGV;
+			break;
+		}
+		/*
+		 * This will probably be optimized away when
+		 * CONFIG_DEBUG_FS is not enabled
+		 */
+		switch (MIPSInst_OPCODE(inst)) {
+		case beql_op:
+			MIPS_R2BR_STATS(beql);
+			break;
+		case bnel_op:
+			MIPS_R2BR_STATS(bnel);
+			break;
+		case blezl_op:
+			MIPS_R2BR_STATS(blezl);
+			break;
+		case bgtzl_op:
+			MIPS_R2BR_STATS(bgtzl);
+			break;
+		}
+
+		switch (MIPSInst_OPCODE(nir)) {
+		case cop1_op:
+		case cop1x_op:
+		case lwc1_op:
+		case swc1_op:
+			regs->cp0_cause |= CAUSEF_BD;
+			goto fpu_emul;
+		}
+		if (nir) {
+			err = mipsr6_emul(regs, nir);
+			if (err > 0) {
+				err = mips_dsemul(regs, nir, cpc);
+				if (err == SIGILL)
+					err = SIGEMT;
+				MIPS_R2_STATS(dsemul);
+			}
+		}
+		break;
+	case lwc1_op:
+	case swc1_op:
+	case cop1_op:
+	case cop1x_op:
+fpu_emul:
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+		if (!used_math()) {     /* First time FPU user.  */
+			err = init_fpu();
+			set_used_math();
+		}
+		lose_fpu(1);    /* Save FPU state for the emulator. */
+
+		err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+					       &fault_addr);
+
+		/*
+		 * this is a tricky issue - lose_fpu() uses LL/SC atomics
+		 * if FPU is owned and effectively cancels user level LL/SC.
+		 * So, it could be logical to don't restore FPU ownership here.
+		 * But the sequence of multiple FPU instructions is much much
+		 * more often than LL-FPU-SC and I prefer loop here until
+		 * next scheduler cycle cancels FPU ownership
+		 */
+		own_fpu(1);	/* Restore FPU state. */
+
+		if (err)
+			current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+		MIPS_R2_STATS(fpus);
+
+		break;
+
+	case lwl_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"2:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"3:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"4:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"2:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"3:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+			"4:"	LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:	sll	%0, %0, 0\n"
+			"10:\n"
+			"	.insn\n"
+			"	.section	.fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	10b\n"
+			"	.previous\n"
+			"	.section	__ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+
+		break;
+
+	case lwr_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"       .set	push\n"
+			"       .set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"2:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"3:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+				ADDIU	"%2, %2, 1\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+			"4:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"       sll	%0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 0, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"2:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 8, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"3:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 16, 8\n"
+			"       andi	%1, %2, 0x3\n"
+			"       beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+			"4:"    LB	"%1, 0(%2)\n"
+				INS	"%0, %1, 24, 8\n"
+			"       sll	%0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"10:\n"
+			"	.insn\n"
+			"	.section	.fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	10b\n"
+			"       .previous\n"
+			"	.section	__ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+
+		break;
+
+	case swl_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+				EXT	"%1, %0, 24, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 16, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 8, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 0, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+				EXT	"%1, %0, 24, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 16, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 8, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 0, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"       .section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+
+	case swr_op:
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+				EXT	"%1, %0, 0, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 8, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 16, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+				ADDIU	"%2, %2, 1\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				EXT	"%1, %0, 24, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+				EXT	"%1, %0, 0, 8\n"
+			"1:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 8, 8\n"
+			"2:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 16, 8\n"
+			"3:"	SB	"%1, 0(%2)\n"
+			"	andi	%1, %2, 0x3\n"
+			"	beq	$0, %1, 9f\n"
+				ADDIU	"%2, %2, -1\n"
+				EXT	"%1, %0, 24, 8\n"
+			"4:"	SB	"%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+
+	case ldl_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set    push\n"
+			"	.set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 56, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"2:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 48, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"3:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 40, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"4:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 32, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"5:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 24, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"6:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 16, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"7:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 8, 8\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"0:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 56, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"2:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 48, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"3:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 40, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"4:	lb	%1, 0(%2)\n"
+			"	dinsu	%0, %1, 32, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"5:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 24, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"6:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 16, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"7:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 8, 8\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"0:	lb	%1, 0(%2)\n"
+			"	dins	%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.word	5b,8b\n"
+			"	.word	6b,8b\n"
+			"	.word	7b,8b\n"
+			"	.word	0b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+		break;
+
+	case ldr_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set    push\n"
+			"	.set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"1:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 0, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"2:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 8, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"3:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 16, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"4:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 24, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"5:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 32, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"6:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 40, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"7:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 48, 8\n"
+			"	daddiu  %2, %2, 1\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"0:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"1:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 0, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"2:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 8, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"3:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 16, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"4:	lb      %1, 0(%2)\n"
+			"	dins   %0, %1, 24, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"5:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 32, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"6:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 40, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"7:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 48, 8\n"
+			"	andi    %1, %2, 0x7\n"
+			"	beq     $0, %1, 9f\n"
+			"	daddiu  %2, %2, -1\n"
+			"0:	lb      %1, 0(%2)\n"
+			"	dinsu    %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li     %3,%4\n"
+			"	j      9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word  1b,8b\n"
+			"	.word  2b,8b\n"
+			"	.word  3b,8b\n"
+			"	.word  4b,8b\n"
+			"	.word  5b,8b\n"
+			"	.word  6b,8b\n"
+			"	.word  7b,8b\n"
+			"	.word  0b,8b\n"
+			"	.previous\n"
+			"	.set    pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV));
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = rt;
+
+		MIPS_R2_STATS(loads);
+		break;
+
+	case sdl_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"	.set	push\n"
+			"	.set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"	dextu	%1, %0, 56, 8\n"
+			"1:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 48, 8\n"
+			"2:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 40, 8\n"
+			"3:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dextu	%1, %0, 32, 8\n"
+			"4:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 24, 8\n"
+			"5:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 16, 8\n"
+			"6:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 8, 8\n"
+			"7:	sb	%1, 0(%2)\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	daddiu	%2, %2, -1\n"
+			"	dext	%1, %0, 0, 8\n"
+			"0:	sb	%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"	dextu	%1, %0, 56, 8\n"
+			"1:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 48, 8\n"
+			"2:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 40, 8\n"
+			"3:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dextu	%1, %0, 32, 8\n"
+			"4:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 24, 8\n"
+			"5:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 16, 8\n"
+			"6:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 8, 8\n"
+			"7:	sb	%1, 0(%2)\n"
+			"	daddiu	%2, %2, 1\n"
+			"	andi	%1, %2, 0x7\n"
+			"	beq	$0, %1, 9f\n"
+			"	dext	%1, %0, 0, 8\n"
+			"0:	sb	%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"	.insn\n"
+			"	.section        .fixup,\"ax\"\n"
+			"8:	li	%3,%4\n"
+			"	j	9b\n"
+			"	.previous\n"
+			"	.section        __ex_table,\"a\"\n"
+			"	.word	1b,8b\n"
+			"	.word	2b,8b\n"
+			"	.word	3b,8b\n"
+			"	.word	4b,8b\n"
+			"	.word	5b,8b\n"
+			"	.word	6b,8b\n"
+			"	.word	7b,8b\n"
+			"	.word	0b,8b\n"
+			"	.previous\n"
+			"	.set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+		break;
+
+	case sdr_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		rt = regs->regs[MIPSInst_RT(inst)];
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGSEGV;
+			break;
+		}
+		__asm__ __volatile__(
+			"       .set	push\n"
+			"       .set	reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+			"       dext	%1, %0, 0, 8\n"
+			"1:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 8, 8\n"
+			"2:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 16, 8\n"
+			"3:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dext	%1, %0, 24, 8\n"
+			"4:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 32, 8\n"
+			"5:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 40, 8\n"
+			"6:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 48, 8\n"
+			"7:     sb	%1, 0(%2)\n"
+			"       daddiu	%2, %2, 1\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       dextu	%1, %0, 56, 8\n"
+			"0:     sb	%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+			"       dext	%1, %0, 0, 8\n"
+			"1:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 8, 8\n"
+			"2:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 16, 8\n"
+			"3:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dext	%1, %0, 24, 8\n"
+			"4:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 32, 8\n"
+			"5:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 40, 8\n"
+			"6:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 48, 8\n"
+			"7:     sb	%1, 0(%2)\n"
+			"       andi	%1, %2, 0x7\n"
+			"       beq	$0, %1, 9f\n"
+			"       daddiu	%2, %2, -1\n"
+			"       dextu	%1, %0, 56, 8\n"
+			"0:     sb	%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+			"9:\n"
+			"       .insn\n"
+			"       .section        .fixup,\"ax\"\n"
+			"8:     li	%3,%4\n"
+			"       j	9b\n"
+			"       .previous\n"
+			"       .section        __ex_table,\"a\"\n"
+			"       .word	1b,8b\n"
+			"       .word	2b,8b\n"
+			"       .word	3b,8b\n"
+			"       .word	4b,8b\n"
+			"       .word	5b,8b\n"
+			"       .word	6b,8b\n"
+			"       .word	7b,8b\n"
+			"       .word	0b,8b\n"
+			"       .previous\n"
+			"       .set	pop\n"
+			: "+&r"(rt), "=&r"(rs),
+			  "+&r"(vaddr), "+&r"(err)
+			: "i"(SIGSEGV)
+			: "memory");
+
+		MIPS_R2_STATS(stores);
+
+		break;
+	case ll_op:
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x3) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_READ, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		__asm__ __volatile__(
+			"1:\n"
+			"ll	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word  1b, 3b\n"
+			".previous\n"
+			: "=&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV)
+			: "memory");
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case sc_op:
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x3) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		res = regs->regs[MIPSInst_RT(inst)];
+
+		__asm__ __volatile__(
+			"1:\n"
+			"sc	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word	1b, 3b\n"
+			".previous\n"
+			: "+&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case lld_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x7) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_READ, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		__asm__ __volatile__(
+			"1:\n"
+			"lld	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word  1b, 3b\n"
+			".previous\n"
+			: "=&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV)
+			: "memory");
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+
+	case scd_op:
+		if (config_enabled(CONFIG_32BIT)) {
+		    err = SIGILL;
+		    break;
+		}
+
+		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+		if (vaddr & 0x7) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+		if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+			current->thread.cp0_baduaddr = vaddr;
+			err = SIGBUS;
+			break;
+		}
+
+		if (!cpu_has_rw_llb) {
+			/*
+			 * An LL/SC block can't be safely emulated without
+			 * a Config5/LLB availability. So it's probably time to
+			 * kill our process before things get any worse. This is
+			 * because Config5/LLB allows us to use ERETNC so that
+			 * the LLAddr/LLB bit is not cleared when we return from
+			 * an exception. MIPS R2 LL/SC instructions trap with an
+			 * RI exception so once we emulate them here, we return
+			 * back to userland with ERETNC. That preserves the
+			 * LLAddr/LLB so the subsequent SC instruction will
+			 * succeed preserving the atomic semantics of the LL/SC
+			 * block. Without that, there is no safe way to emulate
+			 * an LL/SC block in MIPSR2 userland.
+			 */
+			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+			err = SIGKILL;
+			break;
+		}
+
+		res = regs->regs[MIPSInst_RT(inst)];
+
+		__asm__ __volatile__(
+			"1:\n"
+			"scd	%0, 0(%2)\n"
+			"2:\n"
+			".insn\n"
+			".section        .fixup,\"ax\"\n"
+			"3:\n"
+			"li	%1, %3\n"
+			"j	2b\n"
+			".previous\n"
+			".section        __ex_table,\"a\"\n"
+			".word	1b, 3b\n"
+			".previous\n"
+			: "+&r"(res), "+&r"(err)
+			: "r"(vaddr), "i"(SIGSEGV));
+
+		if (MIPSInst_RT(inst) && !err)
+			regs->regs[MIPSInst_RT(inst)] = res;
+
+		MIPS_R2_STATS(llsc);
+
+		break;
+	case pref_op:
+		/* skip it */
+		break;
+	default:
+		err = SIGILL;
+	}
+
+	/*
+	 * Lets not return to userland just yet. It's constly and
+	 * it's likely we have more R2 instructions to emulate
+	 */
+	if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+		regs->cp0_cause &= ~CAUSEF_BD;
+		err = get_user(inst, (u32 __user *)regs->cp0_epc);
+		if (!err)
+			goto repeat;
+
+		if (err < 0)
+			err = SIGSEGV;
+	}
+
+	if (err && (err != SIGEMT)) {
+		regs->regs[31] = r31;
+		regs->cp0_epc = epc;
+	}
+
+	/* Likely a MIPS R6 compatible instruction */
+	if (pass && (err == SIGILL))
+		err = 0;
+
+	return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+	seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+	seq_printf(s, "movs\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+	seq_printf(s, "hilo\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+	seq_printf(s, "muls\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+	seq_printf(s, "divs\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+	seq_printf(s, "dsps\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+	seq_printf(s, "bops\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+	seq_printf(s, "traps\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+	seq_printf(s, "fpus\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+	seq_printf(s, "loads\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+	seq_printf(s, "stores\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+	seq_printf(s, "llsc\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+	seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+	seq_printf(s, "jr\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+	seq_printf(s, "bltzl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+	seq_printf(s, "bgezl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+	seq_printf(s, "bltzll\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+	seq_printf(s, "bgezll\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+	seq_printf(s, "bltzal\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+	seq_printf(s, "bgezal\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+	seq_printf(s, "beql\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+	seq_printf(s, "bnel\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+	seq_printf(s, "blezl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+	seq_printf(s, "bgtzl\t\t%ld\n",
+		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+	return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+	mipsr2_stats_show(s, unused);
+
+	__this_cpu_write((mipsr2emustats).movs, 0);
+	__this_cpu_write((mipsr2bdemustats).movs, 0);
+	__this_cpu_write((mipsr2emustats).hilo, 0);
+	__this_cpu_write((mipsr2bdemustats).hilo, 0);
+	__this_cpu_write((mipsr2emustats).muls, 0);
+	__this_cpu_write((mipsr2bdemustats).muls, 0);
+	__this_cpu_write((mipsr2emustats).divs, 0);
+	__this_cpu_write((mipsr2bdemustats).divs, 0);
+	__this_cpu_write((mipsr2emustats).dsps, 0);
+	__this_cpu_write((mipsr2bdemustats).dsps, 0);
+	__this_cpu_write((mipsr2emustats).bops, 0);
+	__this_cpu_write((mipsr2bdemustats).bops, 0);
+	__this_cpu_write((mipsr2emustats).traps, 0);
+	__this_cpu_write((mipsr2bdemustats).traps, 0);
+	__this_cpu_write((mipsr2emustats).fpus, 0);
+	__this_cpu_write((mipsr2bdemustats).fpus, 0);
+	__this_cpu_write((mipsr2emustats).loads, 0);
+	__this_cpu_write((mipsr2bdemustats).loads, 0);
+	__this_cpu_write((mipsr2emustats).stores, 0);
+	__this_cpu_write((mipsr2bdemustats).stores, 0);
+	__this_cpu_write((mipsr2emustats).llsc, 0);
+	__this_cpu_write((mipsr2bdemustats).llsc, 0);
+	__this_cpu_write((mipsr2emustats).dsemul, 0);
+	__this_cpu_write((mipsr2bdemustats).dsemul, 0);
+	__this_cpu_write((mipsr2bremustats).jrs, 0);
+	__this_cpu_write((mipsr2bremustats).bltzl, 0);
+	__this_cpu_write((mipsr2bremustats).bgezl, 0);
+	__this_cpu_write((mipsr2bremustats).bltzll, 0);
+	__this_cpu_write((mipsr2bremustats).bgezll, 0);
+	__this_cpu_write((mipsr2bremustats).bltzal, 0);
+	__this_cpu_write((mipsr2bremustats).bgezal, 0);
+	__this_cpu_write((mipsr2bremustats).beql, 0);
+	__this_cpu_write((mipsr2bremustats).bnel, 0);
+	__this_cpu_write((mipsr2bremustats).blezl, 0);
+	__this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+	return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_emul_fops = {
+	.open                   = mipsr2_stats_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+	.open                   = mipsr2_stats_clear_open,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
+
+static int __init mipsr2_init_debugfs(void)
+{
+	extern struct dentry	*mips_debugfs_dir;
+	struct dentry		*mipsr2_emul;
+
+	if (!mips_debugfs_dir)
+		return -ENODEV;
+
+	mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
+					  mips_debugfs_dir, NULL,
+					  &mipsr2_emul_fops);
+	if (!mipsr2_emul)
+		return -ENOMEM;
+
+	mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
+					  mips_debugfs_dir, NULL,
+					  &mipsr2_clear_fops);
+	if (!mipsr2_emul)
+		return -ENOMEM;
+
+	return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 17eaf0c..291af0b 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -14,6 +14,8 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 #include <asm/ftrace.h>
+#include <asm/fpu.h>
+#include <asm/msa.h>
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_kernel_nocheck_asm(char *__to,
@@ -32,6 +34,14 @@
 extern long __strnlen_user_asm(const char *s);
 
 /*
+ * Core architecture code
+ */
+EXPORT_SYMBOL_GPL(_save_fp);
+#ifdef CONFIG_CPU_HAS_MSA
+EXPORT_SYMBOL_GPL(_save_msa);
+#endif
+
+/*
  * String functions
  */
 EXPORT_SYMBOL(memset);
@@ -67,11 +77,13 @@
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
+#endif
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index f654768..423ae83 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -31,15 +31,11 @@
 	/*
 	 * check if we need to save FPU registers
 	 */
-	PTR_L	t3, TASK_THREAD_INFO(a0)
-	LONG_L	t0, TI_FLAGS(t3)
-	li	t1, _TIF_USEDFPU
-	and	t2, t0, t1
-	beqz	t2, 1f
-	nor	t1, zero, t1
-
-	and	t0, t0, t1
-	LONG_S	t0, TI_FLAGS(t3)
+	.set push
+	.set noreorder
+	beqz	a3, 1f
+	 PTR_L	t3, TASK_THREAD_INFO(a0)
+	.set pop
 
 	/*
 	 * clear saved user stack CU1 bit
@@ -56,36 +52,9 @@
 	.set pop
 1:
 
-	/* check if we need to save COP2 registers */
-	PTR_L	t2, TASK_THREAD_INFO(a0)
-	LONG_L	t0, ST_OFF(t2)
-	bbit0	t0, 30, 1f
-
-	/* Disable COP2 in the stored process state */
-	li	t1, ST0_CU2
-	xor	t0, t1
-	LONG_S	t0, ST_OFF(t2)
-
-	/* Enable COP2 so we can save it */
-	mfc0	t0, CP0_STATUS
-	or	t0, t1
-	mtc0	t0, CP0_STATUS
-
-	/* Save COP2 */
-	daddu	a0, THREAD_CP2
-	jal octeon_cop2_save
-	dsubu	a0, THREAD_CP2
-
-	/* Disable COP2 now that we are done */
-	mfc0	t0, CP0_STATUS
-	li	t1, ST0_CU2
-	xor	t0, t1
-	mtc0	t0, CP0_STATUS
-
-1:
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 	/* Check if we need to store CVMSEG state */
-	mfc0	t0, $11,7	/* CvmMemCtl */
+	dmfc0	t0, $11,7	/* CvmMemCtl */
 	bbit0	t0, 6, 3f	/* Is user access enabled? */
 
 	/* Store the CVMSEG state */
@@ -109,9 +78,9 @@
 	.set reorder
 
 	/* Disable access to CVMSEG */
-	mfc0	t0, $11,7	/* CvmMemCtl */
+	dmfc0	t0, $11,7	/* CvmMemCtl */
 	xori	t0, t0, 0x40	/* Bit 6 is CVMSEG user enable */
-	mtc0	t0, $11,7	/* CvmMemCtl */
+	dmtc0	t0, $11,7	/* CvmMemCtl */
 #endif
 3:
 
@@ -147,6 +116,8 @@
  * void octeon_cop2_save(struct octeon_cop2_state *a0)
  */
 	.align	7
+	.set push
+	.set noreorder
 	LEAF(octeon_cop2_save)
 
 	dmfc0	t9, $9,7	/* CvmCtl register. */
@@ -157,17 +128,17 @@
 	dmfc2	t2, 0x0200
 	sd	t0, OCTEON_CP2_CRC_IV(a0)
 	sd	t1, OCTEON_CP2_CRC_LENGTH(a0)
-	sd	t2, OCTEON_CP2_CRC_POLY(a0)
 	/* Skip next instructions if CvmCtl[NODFA_CP2] set */
 	bbit1	t9, 28, 1f
+	 sd	t2, OCTEON_CP2_CRC_POLY(a0)
 
 	/* Save the LLM state */
 	dmfc2	t0, 0x0402
 	dmfc2	t1, 0x040A
 	sd	t0, OCTEON_CP2_LLM_DAT(a0)
-	sd	t1, OCTEON_CP2_LLM_DAT+8(a0)
 
 1:	bbit1	t9, 26, 3f	/* done if CvmCtl[NOCRYPTO] set */
+	 sd	t1, OCTEON_CP2_LLM_DAT+8(a0)
 
 	/* Save the COP2 crypto state */
 	/* this part is mostly common to both pass 1 and later revisions */
@@ -198,18 +169,20 @@
 	sd	t2, OCTEON_CP2_AES_KEY+16(a0)
 	dmfc2	t2, 0x0101
 	sd	t3, OCTEON_CP2_AES_KEY+24(a0)
-	mfc0	t3, $15,0	/* Get the processor ID register */
+	mfc0	v0, $15,0	/* Get the processor ID register */
 	sd	t0, OCTEON_CP2_AES_KEYLEN(a0)
-	li	t0, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
+	li	v1, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
 	sd	t1, OCTEON_CP2_AES_RESULT(a0)
-	sd	t2, OCTEON_CP2_AES_RESULT+8(a0)
 	/* Skip to the Pass1 version of the remainder of the COP2 state */
-	beq	t3, t0, 2f
+	beq	v0, v1, 2f
+	 sd	t2, OCTEON_CP2_AES_RESULT+8(a0)
 
 	/* the non-pass1 state when !CvmCtl[NOCRYPTO] */
 	dmfc2	t1, 0x0240
 	dmfc2	t2, 0x0241
+	ori	v1, v1, 0x9500 /* lowest OCTEON III PrId*/
 	dmfc2	t3, 0x0242
+	subu	v1, v0, v1 /* prid - lowest OCTEON III PrId */
 	dmfc2	t0, 0x0243
 	sd	t1, OCTEON_CP2_HSH_DATW(a0)
 	dmfc2	t1, 0x0244
@@ -262,8 +235,16 @@
 	sd	t1, OCTEON_CP2_GFM_MULT+8(a0)
 	sd	t2, OCTEON_CP2_GFM_POLY(a0)
 	sd	t3, OCTEON_CP2_GFM_RESULT(a0)
-	sd	t0, OCTEON_CP2_GFM_RESULT+8(a0)
+	bltz	v1, 4f
+	 sd	t0, OCTEON_CP2_GFM_RESULT+8(a0)
+	/* OCTEON III things*/
+	dmfc2	t0, 0x024F
+	dmfc2	t1, 0x0050
+	sd	t0, OCTEON_CP2_SHA3(a0)
+	sd	t1, OCTEON_CP2_SHA3+8(a0)
+4:
 	jr	ra
+	 nop
 
 2:	/* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
 	dmfc2	t3, 0x0040
@@ -289,7 +270,9 @@
 
 3:	/* pass 1 or CvmCtl[NOCRYPTO] set */
 	jr	ra
+	 nop
 	END(octeon_cop2_save)
+	.set pop
 
 /*
  * void octeon_cop2_restore(struct octeon_cop2_state *a0)
@@ -354,9 +337,9 @@
 	ld	t2, OCTEON_CP2_AES_RESULT+8(a0)
 	mfc0	t3, $15,0	/* Get the processor ID register */
 	dmtc2	t0, 0x0110
-	li	t0, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
+	li	v0, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
 	dmtc2	t1, 0x0100
-	bne	t0, t3, 3f	/* Skip the next stuff for non-pass1 */
+	bne	v0, t3, 3f	/* Skip the next stuff for non-pass1 */
 	 dmtc2	t2, 0x0101
 
 	/* this code is specific for pass 1 */
@@ -384,6 +367,7 @@
 
 3:	/* this is post-pass1 code */
 	ld	t2, OCTEON_CP2_HSH_DATW(a0)
+	ori	v0, v0, 0x9500 /* lowest OCTEON III PrId*/
 	ld	t0, OCTEON_CP2_HSH_DATW+8(a0)
 	ld	t1, OCTEON_CP2_HSH_DATW+16(a0)
 	dmtc2	t2, 0x0240
@@ -437,9 +421,15 @@
 	dmtc2	t2, 0x0259
 	ld	t2, OCTEON_CP2_GFM_RESULT+8(a0)
 	dmtc2	t0, 0x025E
+	subu	v0, t3, v0 /* prid - lowest OCTEON III PrId */
 	dmtc2	t1, 0x025A
-	dmtc2	t2, 0x025B
-
+	bltz	v0, done_restore
+	 dmtc2	t2, 0x025B
+	/* OCTEON III things*/
+	ld	t0, OCTEON_CP2_SHA3(a0)
+	ld	t1, OCTEON_CP2_SHA3+8(a0)
+	dmtc2	t0, 0x0051
+	dmtc2	t1, 0x0050
 done_restore:
 	jr	ra
 	 nop
@@ -450,18 +440,23 @@
  * void octeon_mult_save()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- *	 safely modify k0 and k1.
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ *       safely modify v1,k0, k1,$10-$15, and $24.  It will
+ *	 be overwritten with a processor specific version of the code.
  */
-	.align	7
+	.p2align 7
 	.set push
 	.set noreorder
 	LEAF(octeon_mult_save)
-	dmfc0	k0, $9,7	/* CvmCtl register. */
-	bbit1	k0, 27, 1f	/* Skip CvmCtl[NOMUL] */
+	jr	ra
 	 nop
+	.space 30 * 4, 0
+octeon_mult_save_end:
+	EXPORT(octeon_mult_save_end)
+	END(octeon_mult_save)
 
-	/* Save the multiplier state */
+	LEAF(octeon_mult_save2)
+	/* Save the multiplier state OCTEON II and earlier*/
 	v3mulu	k0, $0, $0
 	v3mulu	k1, $0, $0
 	sd	k0, PT_MTP(sp)	      /* PT_MTP	   has P0 */
@@ -476,44 +471,107 @@
 	sd	k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
 	jr	ra
 	 sd	k1, PT_MPL+16(sp)     /* PT_MPL+16 has MPL2 */
+octeon_mult_save2_end:
+	EXPORT(octeon_mult_save2_end)
+	END(octeon_mult_save2)
 
-1:	/* Resume here if CvmCtl[NOMUL] */
+	LEAF(octeon_mult_save3)
+	/* Save the multiplier state OCTEON III */
+	v3mulu	$10, $0, $0		/* read P0 */
+	v3mulu	$11, $0, $0		/* read P1 */
+	v3mulu	$12, $0, $0		/* read P2 */
+	sd	$10, PT_MTP+(0*8)(sp)	/* store P0 */
+	v3mulu	$10, $0, $0		/* read P3 */
+	sd	$11, PT_MTP+(1*8)(sp)	/*  store P1 */
+	v3mulu	$11, $0, $0		/* read P4 */
+	sd	$12, PT_MTP+(2*8)(sp)	/* store P2 */
+	ori	$13, $0, 1
+	v3mulu	$12, $0, $0		/* read P5 */
+	sd	$10, PT_MTP+(3*8)(sp)	/* store P3 */
+	v3mulu	$13, $13, $0		/* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+	sd	$11, PT_MTP+(4*8)(sp)	/* store P4 */
+	v3mulu	$10, $0, $0		/* read MPL1 */
+	sd	$12, PT_MTP+(5*8)(sp)	/* store P5 */
+	v3mulu	$11, $0, $0		/* read MPL2 */
+	sd	$13, PT_MPL+(0*8)(sp)	/* store MPL0 */
+	v3mulu	$12, $0, $0		/* read MPL3 */
+	sd	$10, PT_MPL+(1*8)(sp)	/* store MPL1 */
+	v3mulu	$10, $0, $0		/* read MPL4 */
+	sd	$11, PT_MPL+(2*8)(sp)	/* store MPL2 */
+	v3mulu	$11, $0, $0		/* read MPL5 */
+	sd	$12, PT_MPL+(3*8)(sp)	/* store MPL3 */
+	sd	$10, PT_MPL+(4*8)(sp)	/* store MPL4 */
 	jr	ra
-	END(octeon_mult_save)
+	 sd	$11, PT_MPL+(5*8)(sp)	/* store MPL5 */
+octeon_mult_save3_end:
+	EXPORT(octeon_mult_save3_end)
+	END(octeon_mult_save3)
 	.set pop
 
 /*
  * void octeon_mult_restore()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
  */
-	.align	7
+	.p2align 7
 	.set push
 	.set noreorder
 	LEAF(octeon_mult_restore)
-	dmfc0	k1, $9,7		/* CvmCtl register. */
-	ld	v0, PT_MPL(sp)		/* MPL0 */
-	ld	v1, PT_MPL+8(sp)	/* MPL1 */
-	ld	k0, PT_MPL+16(sp)	/* MPL2 */
-	bbit1	k1, 27, 1f		/* Skip CvmCtl[NOMUL] */
-	/* Normally falls through, so no time wasted here */
-	nop
-
-	/* Restore the multiplier state */
-	ld	k1, PT_MTP+16(sp)	/* P2 */
-	MTM0	v0			/* MPL0 */
-	ld	v0, PT_MTP+8(sp)	/* P1 */
-	MTM1	v1			/* MPL1 */
-	ld	v1, PT_MTP(sp)		/* P0 */
-	MTM2	k0			/* MPL2 */
-	MTP2	k1			/* P2 */
-	MTP1	v0			/* P1 */
-	jr	ra
-	 MTP0	v1			/* P0 */
-
-1:	/* Resume here if CvmCtl[NOMUL] */
 	jr	ra
 	 nop
+	.space 30 * 4, 0
+octeon_mult_restore_end:
+	EXPORT(octeon_mult_restore_end)
 	END(octeon_mult_restore)
+
+	LEAF(octeon_mult_restore2)
+	ld	v0, PT_MPL(sp)        	/* MPL0 */
+	ld	v1, PT_MPL+8(sp)      	/* MPL1 */
+	ld	k0, PT_MPL+16(sp)     	/* MPL2 */
+	/* Restore the multiplier state */
+	ld	k1, PT_MTP+16(sp)     	/* P2 */
+	mtm0	v0			/* MPL0 */
+	ld	v0, PT_MTP+8(sp)	/* P1 */
+	mtm1	v1			/* MPL1 */
+	ld	v1, PT_MTP(sp)   	/* P0 */
+	mtm2	k0			/* MPL2 */
+	mtp2	k1			/* P2 */
+	mtp1	v0			/* P1 */
+	jr	ra
+	 mtp0	v1			/* P0 */
+octeon_mult_restore2_end:
+	EXPORT(octeon_mult_restore2_end)
+	END(octeon_mult_restore2)
+
+	LEAF(octeon_mult_restore3)
+	ld	$12, PT_MPL+(0*8)(sp)	/* read MPL0 */
+	ld	$13, PT_MPL+(3*8)(sp)	/* read MPL3 */
+	ld	$10, PT_MPL+(1*8)(sp)	/* read MPL1 */
+	ld	$11, PT_MPL+(4*8)(sp)	/* read MPL4 */
+	.word	0x718d0008
+	/* mtm0	$12, $13		   restore MPL0 and MPL3 */
+	ld	$12, PT_MPL+(2*8)(sp)	/* read MPL2 */
+	.word	0x714b000c
+	/* mtm1	$10, $11		   restore MPL1 and MPL4 */
+	ld	$13, PT_MPL+(5*8)(sp)	/* read MPL5 */
+	ld	$10, PT_MTP+(0*8)(sp)	/* read P0 */
+	ld	$11, PT_MTP+(3*8)(sp)	/* read P3 */
+	.word	0x718d000d
+	/* mtm2	$12, $13		   restore MPL2 and MPL5 */
+	ld	$12, PT_MTP+(1*8)(sp)	/* read P1 */
+	.word	0x714b0009
+	/* mtp0	$10, $11		   restore P0 and P3 */
+	ld	$13, PT_MTP+(4*8)(sp)	/* read P4 */
+	ld	$10, PT_MTP+(2*8)(sp)	/* read P2 */
+	ld	$11, PT_MTP+(5*8)(sp)	/* read P5 */
+	.word	0x718d000a
+	/* mtp1	$12, $13		   restore P1 and P4 */
+	jr	ra
+	.word	0x714b000b
+	/* mtp2	$10, $11		   restore P2 and P5 */
+
+octeon_mult_restore3_end:
+	EXPORT(octeon_mult_restore3_end)
+	END(octeon_mult_restore3)
 	.set pop
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 097fc8d..130af7d 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -82,7 +82,9 @@
 		seq_printf(m, "]\n");
 	}
 
-	seq_printf(m, "isa\t\t\t: mips1");
+	seq_printf(m, "isa\t\t\t:"); 
+	if (cpu_has_mips_r1)
+		seq_printf(m, " mips1");
 	if (cpu_has_mips_2)
 		seq_printf(m, "%s", " mips2");
 	if (cpu_has_mips_3)
@@ -95,10 +97,14 @@
 		seq_printf(m, "%s", " mips32r1");
 	if (cpu_has_mips32r2)
 		seq_printf(m, "%s", " mips32r2");
+	if (cpu_has_mips32r6)
+		seq_printf(m, "%s", " mips32r6");
 	if (cpu_has_mips64r1)
 		seq_printf(m, "%s", " mips64r1");
 	if (cpu_has_mips64r2)
 		seq_printf(m, "%s", " mips64r2");
+	if (cpu_has_mips64r6)
+		seq_printf(m, "%s", " mips64r6");
 	seq_printf(m, "\n");
 
 	seq_printf(m, "ASEs implemented\t:");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 85bff5d..bf85cc1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,6 +25,7 @@
 #include <linux/completion.h>
 #include <linux/kallsyms.h>
 #include <linux/random.h>
+#include <linux/prctl.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -562,3 +563,98 @@
 {
 	smp_call_function(arch_dump_stack, NULL, 1);
 }
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+	int value = 0;
+
+	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+		value |= PR_FP_MODE_FR;
+	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+		value |= PR_FP_MODE_FRE;
+
+	return value;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+	unsigned long switch_count;
+	struct task_struct *t;
+
+	/* Check the value is valid */
+	if (value & ~known_bits)
+		return -EOPNOTSUPP;
+
+	/* Avoid inadvertently triggering emulation */
+	if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+	    !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+		return -EOPNOTSUPP;
+	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+		return -EOPNOTSUPP;
+
+	/* FR = 0 not supported in MIPS R6 */
+	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+		return -EOPNOTSUPP;
+
+	/* Save FP & vector context, then disable FPU & MSA */
+	if (task->signal == current->signal)
+		lose_fpu(1);
+
+	/* Prevent any threads from obtaining live FP context */
+	atomic_set(&task->mm->context.fp_mode_switching, 1);
+	smp_mb__after_atomic();
+
+	/*
+	 * If there are multiple online CPUs then wait until all threads whose
+	 * FP mode is about to change have been context switched. This approach
+	 * allows us to only worry about whether an FP mode switch is in
+	 * progress when FP is first used in a tasks time slice. Pretty much all
+	 * of the mode switch overhead can thus be confined to cases where mode
+	 * switches are actually occuring. That is, to here. However for the
+	 * thread performing the mode switch it may take a while...
+	 */
+	if (num_online_cpus() > 1) {
+		spin_lock_irq(&task->sighand->siglock);
+
+		for_each_thread(task, t) {
+			if (t == current)
+				continue;
+
+			switch_count = t->nvcsw + t->nivcsw;
+
+			do {
+				spin_unlock_irq(&task->sighand->siglock);
+				cond_resched();
+				spin_lock_irq(&task->sighand->siglock);
+			} while ((t->nvcsw + t->nivcsw) == switch_count);
+		}
+
+		spin_unlock_irq(&task->sighand->siglock);
+	}
+
+	/*
+	 * There are now no threads of the process with live FP context, so it
+	 * is safe to proceed with the FP mode switch.
+	 */
+	for_each_thread(task, t) {
+		/* Update desired FP register width */
+		if (value & PR_FP_MODE_FR) {
+			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+		} else {
+			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+		}
+
+		/* Update desired FP single layout */
+		if (value & PR_FP_MODE_FRE)
+			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+		else
+			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+	}
+
+	/* Allow threads to use FP again */
+	atomic_set(&task->mm->context.fp_mode_switching, 0);
+
+	return 0;
+}
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 6c160c6..676c503 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,7 @@
 	.endm
 
 	.set	noreorder
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 
 LEAF(_save_fp_context)
 	.set	push
@@ -42,7 +42,8 @@
 	cfc1	t1, fcr31
 	.set	pop
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	.set	push
 	SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@
 	SET_HARDFLOAT
 	cfc1	t1, fcr31
 
+#ifndef CONFIG_CPU_MIPS64_R6
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	bgez	t0, 1f			# skip storing odd if FR=0
 	 nop
+#endif
 
 	/* Store the 16 odd double precision registers */
 	EX      sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@
 LEAF(_restore_fp_context)
 	EX	lw t1, SC_FPC_CSR(a0)
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	.set	push
 	SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@
 	SET_HARDFLOAT
 	EX	lw t1, SC32_FPC_CSR(a0)
 
+#ifndef CONFIG_CPU_MIPS64_R6
 	mfc0	t0, CP0_STATUS
 	sll	t0, t0, 5
 	bgez	t0, 1f			# skip loading odd if FR=0
 	 nop
+#endif
 
 	EX      ldc1 $f1, SC32_FPREGS+8(a0)
 	EX      ldc1 $f3, SC32_FPREGS+24(a0)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 64591e6..3b1a36f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -115,7 +115,8 @@
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	mfc0	t0, CP0_STATUS
 #endif
 	fpu_save_double a0 t0 t1		# clobbers t1
@@ -126,7 +127,8 @@
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+		defined(CONFIG_CPU_MIPS32_R6)
 	mfc0	t0, CP0_STATUS
 #endif
 	fpu_restore_double a0 t0 t1		# clobbers t1
@@ -240,9 +242,9 @@
 	mtc1	t1, $f30
 	mtc1	t1, $f31
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
 	.set    push
-	.set    mips32r2
+	.set    MIPS_ISA_LEVEL_RAW
 	.set	fp=64
 	sll     t0, t0, 5			# is Status.FR set?
 	bgez    t0, 1f				# no: skip setting upper 32b
@@ -280,9 +282,9 @@
 	mthc1   t1, $f30
 	mthc1   t1, $f31
 1:	.set    pop
-#endif /* CONFIG_CPU_MIPS32_R2 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
 #else
-	.set	arch=r4000
+	.set	MIPS_ISA_ARCH_LEVEL_RAW
 	dmtc1	t1, $f0
 	dmtc1	t1, $f2
 	dmtc1	t1, $f4
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 67f2495..d1168d7 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -208,6 +208,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_QEMU_GENERIC:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 604b558..53a7ef9 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -136,7 +136,7 @@
 		: "memory");
 	} else if (cpu_has_llsc) {
 		__asm__ __volatile__ (
-		"	.set	arch=r4000				\n"
+		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"	li	%[err], 0				\n"
 		"1:	ll	%[old], (%[addr])			\n"
 		"	move	%[tmp], %[new]				\n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3b41e2..33984c0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
@@ -837,7 +838,7 @@
 	exception_exit(prev_state);
 }
 
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 	const char *str)
 {
 	siginfo_t info;
@@ -1027,7 +1028,34 @@
 	unsigned int opcode = 0;
 	int status = -1;
 
+	/*
+	 * Avoid any kernel code. Just emulate the R2 instruction
+	 * as quickly as possible.
+	 */
+	if (mipsr2_emulation && cpu_has_mips_r6 &&
+	    likely(user_mode(regs))) {
+		if (likely(get_user(opcode, epc) >= 0)) {
+			status = mipsr2_decoder(regs, opcode);
+			switch (status) {
+			case 0:
+			case SIGEMT:
+				task_thread_info(current)->r2_emul_return = 1;
+				return;
+			case SIGILL:
+				goto no_r2_instr;
+			default:
+				process_fpemu_return(status,
+						     &current->thread.cp0_baduaddr);
+				task_thread_info(current)->r2_emul_return = 1;
+				return;
+			}
+		}
+	}
+
+no_r2_instr:
+
 	prev_state = exception_enter();
+
 	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
 		       SIGILL) == NOTIFY_STOP)
 		goto out;
@@ -1134,10 +1162,29 @@
 	return NOTIFY_OK;
 }
 
+static int wait_on_fp_mode_switch(atomic_t *p)
+{
+	/*
+	 * The FP mode for this task is currently being switched. That may
+	 * involve modifications to the format of this tasks FP context which
+	 * make it unsafe to proceed with execution for the moment. Instead,
+	 * schedule some other task.
+	 */
+	schedule();
+	return 0;
+}
+
 static int enable_restore_fp_context(int msa)
 {
 	int err, was_fpu_owner, prior_msa;
 
+	/*
+	 * If an FP mode switch is currently underway, wait for it to
+	 * complete before proceeding.
+	 */
+	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
+			 wait_on_fp_mode_switch, TASK_KILLABLE);
+
 	if (!used_math()) {
 		/* First time FP context user. */
 		preempt_disable();
@@ -1541,6 +1588,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_QEMU_GENERIC:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
@@ -1630,7 +1678,7 @@
 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
 	       reg_val & (1<<30) ? "secondary" : "primary",
 	       reg_val & (1<<31) ? "data" : "insn");
-	if (cpu_has_mips_r2 &&
+	if ((cpu_has_mips_r2_r6) &&
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
 			reg_val & (1<<29) ? "ED " : "",
@@ -1670,7 +1718,7 @@
 	unsigned int reg_val;
 
 	/* For the moment, report the problem and hang. */
-	if (cpu_has_mips_r2 &&
+	if ((cpu_has_mips_r2_r6) &&
 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
 		       read_c0_ecc());
@@ -1959,7 +2007,7 @@
 {
 	unsigned int hwrena = cpu_hwrena_impl_bits;
 
-	if (cpu_has_mips_r2)
+	if (cpu_has_mips_r2_r6)
 		hwrena |= 0x0000000f;
 
 	if (!noulri && cpu_has_userlocal)
@@ -2003,7 +2051,7 @@
 	 *  o read IntCtl.IPTI to determine the timer interrupt
 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
 	 */
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_r6) {
 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2094,7 +2142,7 @@
 #else
         ebase = CKSEG0;
 #endif
-		if (cpu_has_mips_r2)
+		if (cpu_has_mips_r2_r6)
 			ebase += (read_c0_ebase() & 0x3ffff000);
 	}
 
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index e11906d..bbb6969 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -129,6 +129,7 @@
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -146,6 +147,39 @@
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n"			    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lb("%0", "0(%2)")"\n\t"    \
+			"2:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "3(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     LoadHWU(addr, value, res) \
 		__asm__ __volatile__ (                      \
@@ -169,6 +203,7 @@
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -206,6 +241,87 @@
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define	    LoadWU(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lbu("%0", "0(%2)")"\n\t"   \
+			"2:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "3(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:lb\t%0, 0(%2)\n\t"    	    \
+			"2:lbu\t $1, 1(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:lbu\t$1, 2(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:lbu\t$1, 3(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"5:lbu\t$1, 4(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"6:lbu\t$1, 5(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"7:lbu\t$1, 6(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"8:lbu\t$1, 7(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n\t"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     StoreHW(addr, value, res) \
 		__asm__ __volatile__ (                      \
@@ -228,6 +344,7 @@
 			: "=r" (res)                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_swl("%1", "(%2)")"\n"    \
@@ -263,9 +380,82 @@
 			".previous"                         \
 		: "=r" (res)                                \
 		: "r" (value), "r" (addr), "i" (-EFAULT));
-#endif
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_sb("%1", "3(%2)")"\n\t"    \
+			"srl\t$1, %1, 0x8\n\t"		    \
+			"2:"user_sb("$1", "2(%2)")"\n\t"    \
+			"srl\t$1, $1,  0x8\n\t"		    \
+			"3:"user_sb("$1", "1(%2)")"\n\t"    \
+			"srl\t$1, $1, 0x8\n\t"		    \
+			"4:"user_sb("$1", "0(%2)")"\n\t"    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
 
-#ifdef __LITTLE_ENDIAN
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:sb\t%1, 7(%2)\n\t"    	    \
+			"dsrl\t$1, %1, 0x8\n\t"		    \
+			"2:sb\t$1, 6(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"3:sb\t$1, 5(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"4:sb\t$1, 4(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"5:sb\t$1, 3(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"6:sb\t$1, 2(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"7:sb\t$1, 1(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"8:sb\t$1, 0(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#else /* __BIG_ENDIAN */
+
 #define     LoadHW(addr, value, res)  \
 		__asm__ __volatile__ (".set\tnoat\n"        \
 			"1:\t"user_lb("%0", "1(%2)")"\n"    \
@@ -286,6 +476,7 @@
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -303,6 +494,40 @@
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n"			    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lb("%0", "3(%2)")"\n\t"    \
+			"2:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "0(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     LoadHWU(addr, value, res) \
 		__asm__ __volatile__ (                      \
@@ -326,6 +551,7 @@
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -363,6 +589,86 @@
 			".previous"                         \
 			: "=&r" (value), "=r" (res)         \
 			: "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define	    LoadWU(addr, value, res) \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_lbu("%0", "3(%2)")"\n\t"   \
+			"2:"user_lbu("$1", "2(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:"user_lbu("$1", "1(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:"user_lbu("$1", "0(%2)")"\n\t"   \
+			"sll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (			    \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:lb\t%0, 7(%2)\n\t"    	    \
+			"2:lbu\t$1, 6(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"3:lbu\t$1, 5(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"4:lbu\t$1, 4(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"5:lbu\t$1, 3(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"6:lbu\t$1, 2(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"7:lbu\t$1, 1(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"8:lbu\t$1, 0(%2)\n\t"   	    \
+			"dsll\t%0, 0x8\n\t"		    \
+			"or\t%0, $1\n\t"		    \
+			"li\t%1, 0\n"			    \
+			".set\tpop\n\t"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%1, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+			: "=&r" (value), "=r" (res)	    \
+			: "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     StoreHW(addr, value, res) \
 		__asm__ __volatile__ (                      \
@@ -384,7 +690,7 @@
 			".previous"                         \
 			: "=r" (res)                        \
 			: "r" (value), "r" (addr), "i" (-EFAULT));
-
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
 		__asm__ __volatile__ (                      \
 			"1:\t"user_swl("%1", "3(%2)")"\n"   \
@@ -420,6 +726,79 @@
 			".previous"                         \
 		: "=r" (res)                                \
 		: "r" (value), "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:"user_sb("%1", "0(%2)")"\n\t"    \
+			"srl\t$1, %1, 0x8\n\t"		    \
+			"2:"user_sb("$1", "1(%2)")"\n\t"    \
+			"srl\t$1, $1,  0x8\n\t"		    \
+			"3:"user_sb("$1", "2(%2)")"\n\t"    \
+			"srl\t$1, $1, 0x8\n\t"		    \
+			"4:"user_sb("$1", "3(%2)")"\n\t"    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tpush\n\t"		    \
+			".set\tnoat\n\t"		    \
+			"1:sb\t%1, 0(%2)\n\t"    	    \
+			"dsrl\t$1, %1, 0x8\n\t"		    \
+			"2:sb\t$1, 1(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"3:sb\t$1, 2(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"4:sb\t$1, 3(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"5:sb\t$1, 4(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"6:sb\t$1, 5(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"7:sb\t$1, 6(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			"8:sb\t$1, 7(%2)\n\t"    	    \
+			"dsrl\t$1, $1, 0x8\n\t"		    \
+			".set\tpop\n\t"			    \
+			"li\t%0, 0\n"			    \
+			"10:\n\t"			    \
+			".insn\n\t"			    \
+			".section\t.fixup,\"ax\"\n\t"	    \
+			"11:\tli\t%0, %3\n\t"		    \
+			"j\t10b\n\t"			    \
+			".previous\n\t"			    \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 11b\n\t"		    \
+			STR(PTR)"\t2b, 11b\n\t"		    \
+			STR(PTR)"\t3b, 11b\n\t"		    \
+			STR(PTR)"\t4b, 11b\n\t"		    \
+			STR(PTR)"\t5b, 11b\n\t"		    \
+			STR(PTR)"\t6b, 11b\n\t"		    \
+			STR(PTR)"\t7b, 11b\n\t"		    \
+			STR(PTR)"\t8b, 11b\n\t"		    \
+			".previous"			    \
+		: "=&r" (res)			    	    \
+		: "r" (value), "r" (addr), "i" (-EFAULT)    \
+		: "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
 #endif
 
 static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@
 			break;
 		return;
 
+#ifndef CONFIG_CPU_MIPSR6
 	/*
 	 * COP2 is available to implementor for application specific use.
 	 * It's up to applications to register a notifier chain and do
 	 * whatever they have to do, including possible sending of signals.
+	 *
+	 * This instruction has been reallocated in Release 6
 	 */
 	case lwc2_op:
 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@
 	case sdc2_op:
 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 		break;
-
+#endif
 	default:
 		/*
 		 * Pheeee...  We encountered an yet unknown instruction or
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index bbcd822..b6beb0e 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -216,6 +216,7 @@
 	if (idx > current_cpu_data.tlbsize) {
 		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
 		kvm_mips_dump_host_tlbs();
+		local_irq_restore(flags);
 		return -1;
 	}
 
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index c1388d4..bd6437f 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -24,18 +24,18 @@
 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
 	    TP_ARGS(vcpu, reason),
 	    TP_STRUCT__entry(
-			__field(struct kvm_vcpu *, vcpu)
+			__field(unsigned long, pc)
 			__field(unsigned int, reason)
 	    ),
 
 	    TP_fast_assign(
-			__entry->vcpu = vcpu;
+			__entry->pc = vcpu->arch.pc;
 			__entry->reason = reason;
 	    ),
 
 	    TP_printk("[%s]PC: 0x%08lx",
 		      kvm_mips_exit_types_str[__entry->reason],
-		      __entry->vcpu->arch.pc)
+		      __entry->pc)
 );
 
 #endif /* _TRACE_KVM_H */
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index eeddc58..1e9e900 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -8,6 +8,7 @@
 
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)	:= $(filter-out csum_partial.o, $(lib-y))
 
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_R3000)		+= r3k_dump_tlb.o
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 5d3238a..9245e17 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -293,9 +293,14 @@
 	 and	t0, src, ADDRMASK
 	PREFS(	0, 2*32(src) )
 	PREFD(	1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
 	bnez	t1, .Ldst_unaligned\@
 	 nop
 	bnez	t0, .Lsrc_unaligned_dst_aligned\@
+#else
+	or	t0, t0, t1
+	bnez	t0, .Lcopy_unaligned_bytes\@
+#endif
 	/*
 	 * use delay slot for fall-through
 	 * src and dst are aligned; need to compute rem
@@ -376,6 +381,7 @@
 	bne	rem, len, 1b
 	.set	noreorder
 
+#ifndef CONFIG_CPU_MIPSR6
 	/*
 	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
 	 * A loop would do only a byte at a time with possible branch
@@ -477,6 +483,7 @@
 	bne	len, rem, 1b
 	.set	noreorder
 
+#endif /* !CONFIG_CPU_MIPSR6 */
 .Lcopy_bytes_checklen\@:
 	beqz	len, .Ldone\@
 	 nop
@@ -504,6 +511,22 @@
 .Ldone\@:
 	jr	ra
 	 nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+	COPY_BYTE(6)
+	COPY_BYTE(7)
+	ADD	src, src, 8
+	b	1b
+	 ADD	dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
 	.if __memcpy == 1
 	END(memcpy)
 	.set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index c8fe6b1..b8e63fd 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -111,6 +111,7 @@
 	.set		at
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 	EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@)	/* make word/dword aligned */
@@ -120,6 +121,30 @@
 	PTR_SUBU	a0, t0			/* long align ptr */
 	PTR_ADDU	a2, t0			/* correct size */
 
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)				\
+	EX(sb, a1, N(a0), .Lbyte_fixup\@);	\
+	beqz		t0, 0f;			\
+	PTR_ADDU	t0, 1;
+
+	PTR_ADDU	a2, t0			/* correct size */
+	PTR_ADDU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+	ori		a0, STORMASK
+	xori		a0, STORMASK
+	PTR_ADDIU	a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 	xori		t1, 0x3f
 	beqz		t1, .Lmemset_partial\@	/* no block to fill */
@@ -159,6 +184,7 @@
 	andi		a2, STORMASK		/* At most one long to go */
 
 	beqz		a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
 	PTR_ADDU	a0, a2			/* What's left */
 	R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
@@ -166,6 +192,22 @@
 #else
 	EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
+#else
+	PTR_SUBU	t0, $0, a2
+	PTR_ADDIU	t0, 1
+	STORE_BYTE(0)
+	STORE_BYTE(1)
+#if LONGSIZE == 4
+	EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+	STORE_BYTE(2)
+	STORE_BYTE(3)
+	STORE_BYTE(4)
+	STORE_BYTE(5)
+	EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
 1:	jr		ra
 	move		a2, zero
 
@@ -186,6 +228,11 @@
 	.hidden __memset
 	.endif
 
+.Lbyte_fixup\@:
+	PTR_SUBU	a2, $0, t0
+	jr		ra
+	 PTR_ADDIU	a2, 1
+
 .Lfirst_fixup\@:
 	jr	ra
 	nop
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index be777d9..272af8a 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 9dfcd7f..b30bf65 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -48,6 +48,7 @@
 #include <asm/processor.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
 
 #include "ieee754.h"
 
@@ -68,7 +69,7 @@
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
 /* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
 	FPU_CSR_COND0,
 	FPU_CSR_COND1,
 	FPU_CSR_COND2,
@@ -448,6 +449,9 @@
 				dec_insn.next_pc_inc;
 			/* Fall through */
 		case jr_op:
+			/* For R6, JR already emulated in jalr_op */
+			if (NO_R6EMU && insn.r_format.opcode == jr_op)
+				break;
 			*contpc = regs->regs[insn.r_format.rs];
 			return 1;
 		}
@@ -456,12 +460,18 @@
 		switch (insn.i_format.rt) {
 		case bltzal_op:
 		case bltzall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bltzall_op))
+				break;
+
 			regs->regs[31] = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 			/* Fall through */
-		case bltz_op:
 		case bltzl_op:
+			if (NO_R6EMU)
+				break;
+		case bltz_op:
 			if ((long)regs->regs[insn.i_format.rs] < 0)
 				*contpc = regs->cp0_epc +
 					dec_insn.pc_inc +
@@ -473,12 +483,18 @@
 			return 1;
 		case bgezal_op:
 		case bgezall_op:
+			if (NO_R6EMU && (insn.i_format.rs ||
+			    insn.i_format.rt == bgezall_op))
+				break;
+
 			regs->regs[31] = regs->cp0_epc +
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 			/* Fall through */
-		case bgez_op:
 		case bgezl_op:
+			if (NO_R6EMU)
+				break;
+		case bgez_op:
 			if ((long)regs->regs[insn.i_format.rs] >= 0)
 				*contpc = regs->cp0_epc +
 					dec_insn.pc_inc +
@@ -505,8 +521,10 @@
 		/* Set microMIPS mode bit: XOR for jalx. */
 		*contpc ^= bit;
 		return 1;
-	case beq_op:
 	case beql_op:
+		if (NO_R6EMU)
+			break;
+	case beq_op:
 		if (regs->regs[insn.i_format.rs] ==
 		    regs->regs[insn.i_format.rt])
 			*contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 		return 1;
-	case bne_op:
 	case bnel_op:
+		if (NO_R6EMU)
+			break;
+	case bne_op:
 		if (regs->regs[insn.i_format.rs] !=
 		    regs->regs[insn.i_format.rt])
 			*contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 		return 1;
-	case blez_op:
 	case blezl_op:
+		if (NO_R6EMU)
+			break;
+	case blez_op:
+
+		/*
+		 * Compact branches for R6 for the
+		 * blez and blezl opcodes.
+		 * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+		 * BLEZ  | rs = rt != 0      == BGEZALC
+		 * BLEZ  | rs != 0 | rt != 0 == BGEUC
+		 * BLEZL | rs = 0 | rt != 0  == BLEZC
+		 * BLEZL | rs = rt != 0      == BGEZC
+		 * BLEZL | rs != 0 | rt != 0 == BGEC
+		 *
+		 * For real BLEZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = regs->cp0_epc +
+					dec_insn.pc_inc;
+			*contpc = regs->cp0_epc + dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+
+			return 1;
+		}
 		if ((long)regs->regs[insn.i_format.rs] <= 0)
 			*contpc = regs->cp0_epc +
 				dec_insn.pc_inc +
@@ -540,8 +586,35 @@
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 		return 1;
-	case bgtz_op:
 	case bgtzl_op:
+		if (NO_R6EMU)
+			break;
+	case bgtz_op:
+		/*
+		 * Compact branches for R6 for the
+		 * bgtz and bgtzl opcodes.
+		 * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+		 * BGTZ  | rs = rt != 0      == BLTZALC
+		 * BGTZ  | rs != 0 | rt != 0 == BLTUC
+		 * BGTZL | rs = 0 | rt != 0  == BGTZC
+		 * BGTZL | rs = rt != 0      == BLTZC
+		 * BGTZL | rs != 0 | rt != 0 == BLTC
+		 *
+		 * *ZALC varint for BGTZ &&& rt != 0
+		 * For real GTZ{,L}, rt is always 0.
+		 */
+		if (cpu_has_mips_r6 && insn.i_format.rt) {
+			if ((insn.i_format.opcode == blez_op) &&
+			    ((!insn.i_format.rs && insn.i_format.rt) ||
+			     (insn.i_format.rs == insn.i_format.rt)))
+				regs->regs[31] = regs->cp0_epc +
+					dec_insn.pc_inc;
+			*contpc = regs->cp0_epc + dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+
+			return 1;
+		}
+
 		if ((long)regs->regs[insn.i_format.rs] > 0)
 			*contpc = regs->cp0_epc +
 				dec_insn.pc_inc +
@@ -551,6 +624,16 @@
 				dec_insn.pc_inc +
 				dec_insn.next_pc_inc;
 		return 1;
+	case cbcond0_op:
+	case cbcond1_op:
+		if (!cpu_has_mips_r6)
+			break;
+		if (insn.i_format.rt && !insn.i_format.rs)
+			regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	case lwc2_op: /* This is bbit0 on Octeon */
 		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@
 		else
 			*contpc = regs->cp0_epc + 8;
 		return 1;
+#else
+	case bc6_op:
+		/*
+		 * Only valid for MIPS R6 but we can still end up
+		 * here from a broken userland so just tell emulator
+		 * this is not a branch and let it break later on.
+		 */
+		if  (!cpu_has_mips_r6)
+			break;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case balc6_op:
+		if (!cpu_has_mips_r6)
+			break;
+		regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case beqzcjic_op:
+		if (!cpu_has_mips_r6)
+			break;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
+	case bnezcjialc_op:
+		if (!cpu_has_mips_r6)
+			break;
+		if (!insn.i_format.rs)
+			regs->regs[31] = regs->cp0_epc + 4;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+
+		return 1;
 #endif
 	case cop0_op:
 	case cop1_op:
+		/* Need to check for R6 bc1nez and bc1eqz branches */
+		if (cpu_has_mips_r6 &&
+		    ((insn.i_format.rs == bc1eqz_op) ||
+		     (insn.i_format.rs == bc1nez_op))) {
+			bit = 0;
+			switch (insn.i_format.rs) {
+			case bc1eqz_op:
+				if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
+				    bit = 1;
+				break;
+			case bc1nez_op:
+				if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
+				    bit = 1;
+				break;
+			}
+			if (bit)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.i_format.simmediate << 2);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+
+			return 1;
+		}
+		/* R2/R6 compatible cop1 instruction. Fall through */
 	case cop2_op:
 	case cop1x_op:
 		if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 */
 		case frsqrt_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 
 			handler.u = fpemu_sp_rsqrt;
 			goto scopuop;
 
 		case frecip_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 
 			handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@
 		 * achieve full IEEE-754 accuracy - however this emulator does.
 		 */
 		case frsqrt_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 
 			handler.u = fpemu_dp_rsqrt;
 			goto dcopuop;
 		case frecip_op:
-			if (!cpu_has_mips_4_5_r2)
+			if (!cpu_has_mips_4_5_r2_r6)
 				return SIGILL;
 
 			handler.u = fpemu_dp_recip;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index dd261df..3f80596 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -794,7 +794,7 @@
 		__asm__ __volatile__ (
 			".set push\n\t"
 			".set noat\n\t"
-			".set mips3\n\t"
+			".set "MIPS_ISA_LEVEL"\n\t"
 #ifdef CONFIG_32BIT
 			"la	$at,1f\n\t"
 #endif
@@ -1255,6 +1255,7 @@
 	case CPU_P5600:
 	case CPU_PROAPTIV:
 	case CPU_M5150:
+	case CPU_QEMU_GENERIC:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@
 
 	default:
 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+				    MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
 			if (mips_sc_init ()) {
 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 70ab5d6..7ff8637 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -28,6 +29,8 @@
 #include <asm/highmem.h>		/* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+int show_unhandled_signals = 1;
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +47,8 @@
 	int fault;
 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
+	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
 #if 0
 	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
 	       current->comm, current->pid, field, address, write,
@@ -203,15 +208,21 @@
 	if (user_mode(regs)) {
 		tsk->thread.cp0_badvaddr = address;
 		tsk->thread.error_code = write;
-#if 0
-		printk("do_page_fault() #2: sending SIGSEGV to %s for "
-		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
-		       tsk->comm,
-		       write ? "write access to" : "read access from",
-		       field, address,
-		       field, (unsigned long) regs->cp0_epc,
-		       field, (unsigned long) regs->regs[31]);
-#endif
+		if (show_unhandled_signals &&
+		    unhandled_signal(tsk, SIGSEGV) &&
+		    __ratelimit(&ratelimit_state)) {
+			pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+				tsk->comm,
+				write ? "write access to" : "read access from",
+				field, address);
+			pr_info("epc = %0*lx in", field,
+				(unsigned long) regs->cp0_epc);
+			print_vma_addr(" ", regs->cp0_epc);
+			pr_info("ra  = %0*lx in", field,
+				(unsigned long) regs->regs[31]);
+			print_vma_addr(" ", regs->regs[31]);
+			pr_info("\n");
+		}
 		info.si_signo = SIGSEGV;
 		info.si_errno = 0;
 		/* info.si_code has been set above */
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index b611102..3f85f92 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -72,6 +72,20 @@
 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d)		\
+do {						\
+	if (cpu_has_mips_r6) {			\
+		if (c <= 0xff && c >= -0x100)	\
+			uasm_i_pref(a, b, c, d);\
+	} else {				\
+		uasm_i_pref(a, b, c, d);	\
+	}					\
+} while(0)
+
 static int pref_bias_clear_store;
 static int pref_bias_copy_load;
 static int pref_bias_copy_store;
@@ -178,7 +192,15 @@
 			pref_bias_copy_load = 256;
 			pref_bias_copy_store = 128;
 			pref_src_mode = Pref_LoadStreamed;
-			pref_dst_mode = Pref_PrepareForStore;
+			if (cpu_has_mips_r6)
+				/*
+				 * Bit 30 (Pref_PrepareForStore) has been
+				 * removed from MIPS R6. Use bit 5
+				 * (Pref_StoreStreamed).
+				 */
+				pref_dst_mode = Pref_StoreStreamed;
+			else
+				pref_dst_mode = Pref_PrepareForStore;
 			break;
 		}
 	} else {
@@ -214,7 +236,7 @@
 		return;
 
 	if (pref_bias_clear_store) {
-		uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+		_uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
 			    A0);
 	} else if (cache_line_size == (half_clear_loop_size << 1)) {
 		if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@
 		return;
 
 	if (pref_bias_copy_load)
-		uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+		_uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
 }
 
 static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@
 		return;
 
 	if (pref_bias_copy_store) {
-		uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+		_uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
 			    A0);
 	} else if (cache_line_size == (half_copy_loop_size << 1)) {
 		if (cpu_has_cache_cdex_s) {
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 99eb8fa..4ceafd1 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -81,6 +81,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_BMIPS5000:
+	case CPU_QEMU_GENERIC:
 		if (config2 & (1 << 12))
 			return 0;
 	}
@@ -104,7 +105,8 @@
 
 	/* Ignore anything but MIPSxx processors */
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-			      MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+			      MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
 		return 0;
 
 	/* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 30639a6..b2afa49 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -485,13 +485,11 @@
 		 * Enable the no read, no exec bits, and enable large virtual
 		 * address.
 		 */
-		u32 pg = PG_RIE | PG_XIE;
 #ifdef CONFIG_64BIT
-		pg |= PG_ELPA;
+		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#else
+		set_c0_pagegrain(PG_RIE | PG_XIE);
 #endif
-		if (cpu_has_rixiex)
-			pg |= PG_IEC;
-		write_c0_pagegrain(pg);
 	}
 
 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 3978a3d..d75ff73 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -501,7 +501,7 @@
 	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 	}
 
-	if (cpu_has_mips_r2) {
+	if (cpu_has_mips_r2_exec_hazard) {
 		/*
 		 * The architecture spec says an ehb is required here,
 		 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@
 		case CPU_PROAPTIV:
 		case CPU_P5600:
 		case CPU_M5150:
+		case CPU_QEMU_GENERIC:
 			break;
 
 		default:
@@ -1952,7 +1953,7 @@
 
 		switch (current_cpu_type()) {
 		default:
-			if (cpu_has_mips_r2) {
+			if (cpu_has_mips_r2_exec_hazard) {
 				uasm_i_ehb(&p);
 
 		case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@
 
 		switch (current_cpu_type()) {
 		default:
-			if (cpu_has_mips_r2) {
+			if (cpu_has_mips_r2_exec_hazard) {
 				uasm_i_ehb(&p);
 
 		case CPU_CAVIUM_OCTEON:
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 8399ddf..d78178d 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -38,14 +38,6 @@
 	 | (e) << RE_SH						\
 	 | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifndef CONFIG_CPU_MICROMIPS
-#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
-
 #include "uasm.c"
 
 static struct insn insn_table_MM[] = {
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 8e02291..b4a83789 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -38,13 +38,13 @@
 	 | (e) << RE_SH						\
 	 | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifdef CONFIG_CPU_MICROMIPS
-#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e)					\
+	((a) << OP_SH						\
+	 | (b) << RS_SH						\
+	 | (c) << RT_SH						\
+	 | (d) << SIMM9_SH					\
+	 | (e) << FUNC_SH)
 
 #include "uasm.c"
 
@@ -62,7 +62,11 @@
 	{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
 	{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
 	{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
 	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +89,22 @@
 	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM },
 	{ insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
 	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#else
+	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#endif
 	{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
 	{ insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+	{ insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
 	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +117,20 @@
 	{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
 	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },
 	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
 	{ insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifndef CONFIG_CPU_MIPSR6
 	{ insn_scd,  M(scd_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+	{ insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+	{ insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#endif
 	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
 	{ insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
@@ -198,6 +220,8 @@
 		op |= build_set(va_arg(ap, u32));
 	if (ip->fields & SCIMM)
 		op |= build_scimm(va_arg(ap, u32));
+	if (ip->fields & SIMM9)
+		op |= build_scimm9(va_arg(ap, u32));
 	va_end(ap);
 
 	**buf = op;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 4adf302..319051c 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -24,7 +24,8 @@
 	JIMM = 0x080,
 	FUNC = 0x100,
 	SET = 0x200,
-	SCIMM = 0x400
+	SCIMM = 0x400,
+	SIMM9 = 0x800,
 };
 
 #define OP_MASK		0x3f
@@ -41,6 +42,8 @@
 #define FUNC_SH		0
 #define SET_MASK	0x7
 #define SET_SH		0
+#define SIMM9_SH	7
+#define SIMM9_MASK	0x1ff
 
 enum opcode {
 	insn_invalid,
@@ -116,6 +119,14 @@
 	return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
+static inline u32 build_scimm9(s32 arg)
+{
+	WARN((arg > 0xff || arg < -0x100),
+	       KERN_WARNING "Micro-assembler field overflow\n");
+
+	return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 static inline u32 build_func(u32 arg)
 {
 	WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -330,7 +341,7 @@
 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
 			    unsigned int c)
 {
-	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+	if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
 		/*
 		 * As per erratum Core-14449, replace prefetches 0-4,
 		 * 6-24 with 'pref 28'.
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index ec1dd24..e1d6989 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -72,7 +72,7 @@
 int get_c0_perfcount_int(void)
 {
 	if (gic_present)
-		return gic_get_c0_compare_int();
+		return gic_get_c0_perfcount_int();
 	if (cp0_perfcount_irq >= 0)
 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 	return -1;
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index f2355e3..f97e169 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -173,8 +173,8 @@
 }
 
 struct pci_ops bcm1480_pci_ops = {
-	.read = bcm1480_pcibios_read,
-	.write = bcm1480_pcibios_write,
+	.read	= bcm1480_pcibios_read,
+	.write	= bcm1480_pcibios_write,
 };
 
 static struct resource bcm1480_mem_resource = {
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index bedb72b..a04af55 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -327,8 +327,8 @@
 
 
 static struct pci_ops octeon_pci_ops = {
-	.read = octeon_read_config,
-	.write = octeon_write_config,
+	.read	= octeon_read_config,
+	.write	= octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index eb4a17b..1bb0b2b 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1792,8 +1792,8 @@
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-	.read = octeon_pcie0_read_config,
-	.write = octeon_pcie0_write_config,
+	.read	= octeon_pcie0_read_config,
+	.write	= octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-	.read = octeon_pcie1_read_config,
-	.write = octeon_pcie1_write_config,
+	.read	= octeon_pcie1_read_config,
+	.write	= octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@
 };
 
 static struct pci_ops octeon_dummy_ops = {
-	.read = octeon_dummy_read_config,
-	.write = octeon_dummy_write_config,
+	.read	= octeon_dummy_read_config,
+	.write	= octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index 8f1b86d..cdf1876 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -152,28 +152,6 @@
 	return 0;
 }
 
-static int gio_device_suspend(struct device *dev, pm_message_t state)
-{
-	struct gio_device *gio_dev = to_gio_device(dev);
-	struct gio_driver *drv = to_gio_driver(dev->driver);
-	int error = 0;
-
-	if (dev->driver && drv->suspend)
-		error = drv->suspend(gio_dev, state);
-	return error;
-}
-
-static int gio_device_resume(struct device *dev)
-{
-	struct gio_device *gio_dev = to_gio_device(dev);
-	struct gio_driver *drv = to_gio_driver(dev->driver);
-	int error = 0;
-
-	if (dev->driver && drv->resume)
-		error = drv->resume(gio_dev);
-	return error;
-}
-
 static void gio_device_shutdown(struct device *dev)
 {
 	struct gio_device *gio_dev = to_gio_device(dev);
@@ -400,8 +378,6 @@
 	.match	   = gio_bus_match,
 	.probe	   = gio_device_probe,
 	.remove	   = gio_device_remove,
-	.suspend   = gio_device_suspend,
-	.resume	   = gio_device_resume,
 	.shutdown  = gio_device_shutdown,
 	.uevent	   = gio_device_uevent,
 };
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index ac37e54..e44a15d 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -8,6 +8,7 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
@@ -25,9 +26,9 @@
 #include <asm/sn/gda.h>
 #include <asm/sn/sn0/hub.h>
 
-void machine_restart(char *command) __attribute__((noreturn));
-void machine_halt(void) __attribute__((noreturn));
-void machine_power_off(void) __attribute__((noreturn));
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
 
 #define noreturn while(1);				/* Silence gcc.	 */
 
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 1f823da..44b3470 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -8,6 +8,7 @@
  * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
  */
 
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -35,9 +36,9 @@
 static struct timer_list power_timer, blink_timer, debounce_timer;
 static int has_panicked, shuting_down;
 
-static void ip32_machine_restart(char *command) __attribute__((noreturn));
-static void ip32_machine_halt(void) __attribute__((noreturn));
-static void ip32_machine_power_off(void) __attribute__((noreturn));
+static void ip32_machine_restart(char *command) __noreturn;
+static void ip32_machine_halt(void) __noreturn;
+static void ip32_machine_power_off(void) __noreturn;
 
 static void ip32_machine_restart(char *cmd)
 {
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index afab728..96d3f9d 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -56,7 +56,9 @@
 #define PGDIR_SHIFT	22
 #define PTRS_PER_PGD	1024
 #define PTRS_PER_PUD	1	/* we don't really have any PUD physically */
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD	1	/* we don't really have any PMD physically */
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PTE	1024
 
 #define PGD_SIZE	PAGE_SIZE
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h
index 20fb1cf..6424621 100644
--- a/arch/nios2/include/asm/ptrace.h
+++ b/arch/nios2/include/asm/ptrace.h
@@ -15,7 +15,54 @@
 
 #include <uapi/asm/ptrace.h>
 
+/* This struct defines the way the registers are stored on the
+   stack during a system call.  */
+
 #ifndef __ASSEMBLY__
+struct pt_regs {
+	unsigned long  r8;	/* r8-r15 Caller-saved GP registers */
+	unsigned long  r9;
+	unsigned long  r10;
+	unsigned long  r11;
+	unsigned long  r12;
+	unsigned long  r13;
+	unsigned long  r14;
+	unsigned long  r15;
+	unsigned long  r1;	/* Assembler temporary */
+	unsigned long  r2;	/* Retval LS 32bits */
+	unsigned long  r3;	/* Retval MS 32bits */
+	unsigned long  r4;	/* r4-r7 Register arguments */
+	unsigned long  r5;
+	unsigned long  r6;
+	unsigned long  r7;
+	unsigned long  orig_r2;	/* Copy of r2 ?? */
+	unsigned long  ra;	/* Return address */
+	unsigned long  fp;	/* Frame pointer */
+	unsigned long  sp;	/* Stack pointer */
+	unsigned long  gp;	/* Global pointer */
+	unsigned long  estatus;
+	unsigned long  ea;	/* Exception return address (pc) */
+	unsigned long  orig_r7;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+	unsigned long  r16;	/* r16-r23 Callee-saved GP registers */
+	unsigned long  r17;
+	unsigned long  r18;
+	unsigned long  r19;
+	unsigned long  r20;
+	unsigned long  r21;
+	unsigned long  r22;
+	unsigned long  r23;
+	unsigned long  fp;
+	unsigned long  gp;
+	unsigned long  ra;
+};
+
 #define user_mode(regs)	(((regs)->estatus & ESTATUS_EU))
 
 #define instruction_pointer(regs)	((regs)->ra)
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h
deleted file mode 100644
index 2c87614..0000000
--- a/arch/nios2/include/asm/ucontext.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2004 Microtronix Datacom Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_NIOS2_UCONTEXT_H
-#define _ASM_NIOS2_UCONTEXT_H
-
-typedef int greg_t;
-#define NGREG 32
-typedef greg_t gregset_t[NGREG];
-
-struct mcontext {
-	int version;
-	gregset_t gregs;
-};
-
-#define MCONTEXT_VERSION 2
-
-struct ucontext {
-	unsigned long	  uc_flags;
-	struct ucontext  *uc_link;
-	stack_t		  uc_stack;
-	struct mcontext	  uc_mcontext;
-	sigset_t	  uc_sigmask;	/* mask last for extensibility */
-};
-
-#endif
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 4f07ca3f8..e0bb972 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 header-y += elf.h
-header-y += ucontext.h
+
+generic-y += ucontext.h
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h
index a5b91ae..6f06d3b 100644
--- a/arch/nios2/include/uapi/asm/elf.h
+++ b/arch/nios2/include/uapi/asm/elf.h
@@ -50,9 +50,7 @@
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG	\
-	((sizeof(struct pt_regs) + sizeof(struct switch_stack)) /	\
-		sizeof(elf_greg_t))
+#define ELF_NGREG		49
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 typedef unsigned long elf_fpregset_t;
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index e83a7c9..71a3305 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -67,53 +67,9 @@
 
 #define NUM_PTRACE_REG (PTR_TLBMISC + 1)
 
-/* this struct defines the way the registers are stored on the
-   stack during a system call.
-
-   There is a fake_regs in setup.c that has to match pt_regs.*/
-
-struct pt_regs {
-	unsigned long  r8;		/* r8-r15 Caller-saved GP registers */
-	unsigned long  r9;
-	unsigned long  r10;
-	unsigned long  r11;
-	unsigned long  r12;
-	unsigned long  r13;
-	unsigned long  r14;
-	unsigned long  r15;
-	unsigned long  r1;		/* Assembler temporary */
-	unsigned long  r2;		/* Retval LS 32bits */
-	unsigned long  r3;		/* Retval MS 32bits */
-	unsigned long  r4;		/* r4-r7 Register arguments */
-	unsigned long  r5;
-	unsigned long  r6;
-	unsigned long  r7;
-	unsigned long  orig_r2;		/* Copy of r2 ?? */
-	unsigned long  ra;		/* Return address */
-	unsigned long  fp;		/* Frame pointer */
-	unsigned long  sp;		/* Stack pointer */
-	unsigned long  gp;		/* Global pointer */
-	unsigned long  estatus;
-	unsigned long  ea;		/* Exception return address (pc) */
-	unsigned long  orig_r7;
-};
-
-/*
- * This is the extended stack used by signal handlers and the context
- * switcher: it's pushed after the normal "struct pt_regs".
- */
-struct switch_stack {
-	unsigned long  r16;		/* r16-r23 Callee-saved GP registers */
-	unsigned long  r17;
-	unsigned long  r18;
-	unsigned long  r19;
-	unsigned long  r20;
-	unsigned long  r21;
-	unsigned long  r22;
-	unsigned long  r23;
-	unsigned long  fp;
-	unsigned long  gp;
-	unsigned long  ra;
+/* User structures for general purpose registers.  */
+struct user_pt_regs {
+	__u32		regs[49];
 };
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h
index 7b8bb41..b67944a 100644
--- a/arch/nios2/include/uapi/asm/sigcontext.h
+++ b/arch/nios2/include/uapi/asm/sigcontext.h
@@ -15,14 +15,16 @@
  * details.
  */
 
-#ifndef _ASM_NIOS2_SIGCONTEXT_H
-#define _ASM_NIOS2_SIGCONTEXT_H
+#ifndef _UAPI__ASM_SIGCONTEXT_H
+#define _UAPI__ASM_SIGCONTEXT_H
 
-#include <asm/ptrace.h>
+#include <linux/types.h>
+
+#define MCONTEXT_VERSION 2
 
 struct sigcontext {
-	struct pt_regs regs;
-	unsigned long  sc_mask;	/* old sigmask */
+	int version;
+	unsigned long gregs[32];
 };
 
 #endif
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index 2d0ea25..dda41e4 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -39,7 +39,7 @@
 					struct ucontext *uc, int *pr2)
 {
 	int temp;
-	greg_t *gregs = uc->uc_mcontext.gregs;
+	unsigned long *gregs = uc->uc_mcontext.gregs;
 	int err;
 
 	/* Always make any pending restarted system calls return -EINTR */
@@ -127,7 +127,7 @@
 static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
 {
 	struct switch_stack *sw = (struct switch_stack *)regs - 1;
-	greg_t *gregs = uc->uc_mcontext.gregs;
+	unsigned long *gregs = uc->uc_mcontext.gregs;
 	int err = 0;
 
 	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 0d231ad..0c9b6af 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -126,7 +126,6 @@
 		break;
 	}
 
-survive:
 	/*
 	 * If for any reason at all we couldn't handle the fault,
 	 * make sure we exit gracefully rather than endlessly redo
@@ -220,11 +219,6 @@
  */
 out_of_memory:
 	up_read(&mm->mmap_sem);
-	if (is_global_init(tsk)) {
-		yield();
-		down_read(&mm->mmap_sem);
-		goto survive;
-	}
 	if (!user_mode(regs))
 		goto no_context;
 	pagefault_out_of_memory();
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 8c966b2..15207b9 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -96,6 +96,7 @@
 #if PT_NLEVELS == 3
 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
+#define __PAGETABLE_PMD_FOLDED
 #define BITS_PER_PMD	0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 51866f1..ca7957b 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -142,6 +142,7 @@
 CONFIG_FSL_HV_MANAGER=y
 CONFIG_STAGING=y
 CONFIG_FSL_CORENET_CF=y
+CONFIG_CLK_QORIQ=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index d6c0c81..04737aa 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -122,6 +122,7 @@
 CONFIG_FSL_DMA=y
 CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
+CONFIG_CLK_QORIQ=y
 CONFIG_FSL_CORENET_CF=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 9cfa370..f1ea597 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -113,6 +113,7 @@
 				 int pci_domain_number, unsigned long pe_num);
 extern int iommu_add_device(struct device *dev);
 extern void iommu_del_device(struct device *dev);
+extern int __init tce_iommu_bus_notifier_init(void);
 #else
 static inline void iommu_register_group(struct iommu_table *tbl,
 					int pci_domain_number,
@@ -128,6 +129,11 @@
 static inline void iommu_del_device(struct device *dev)
 {
 }
+
+static inline int __init tce_iommu_bus_notifier_init(void)
+{
+        return 0;
+}
 #endif /* !CONFIG_IOMMU_API */
 
 static inline void set_iommu_table_base_and_group(struct device *dev,
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644
index 0000000..744fd54
--- /dev/null
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+	return true;
+}
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5d3968c..b054f33 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1175,4 +1175,30 @@
 }
 EXPORT_SYMBOL_GPL(iommu_del_device);
 
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+                unsigned long action, void *data)
+{
+        struct device *dev = data;
+
+        switch (action) {
+        case BUS_NOTIFY_ADD_DEVICE:
+                return iommu_add_device(dev);
+        case BUS_NOTIFY_DEL_DEVICE:
+                if (dev->iommu_group)
+                        iommu_del_device(dev);
+                return 0;
+        default:
+                return 0;
+        }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+        .notifier_call = tce_iommu_bus_notifier,
+};
+
+int __init tce_iommu_bus_notifier_init(void)
+{
+        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+        return 0;
+}
 #endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6e19afa..ec9ec20 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -541,8 +541,8 @@
 	if (smp_ops->give_timebase)
 		smp_ops->give_timebase();
 
-	/* Wait until cpu puts itself in the online map */
-	while (!cpu_online(cpu))
+	/* Wait until cpu puts itself in the online & active maps */
+	while (!cpu_online(cpu) || !cpu_active(cpu))
 		cpu_relax();
 
 	return 0;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7316dd1..2d7b33f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -54,6 +54,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/irq_work.h>
+#include <linux/clk-provider.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -975,6 +976,10 @@
 
 	init_decrementer_clockevent();
 	tick_setup_hrtimer_broadcast();
+
+#ifdef CONFIG_COMMON_CLK
+	of_clk_init(NULL);
+#endif
 }
 
 
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 6eb614a..f691bca 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -1168,6 +1168,11 @@
 	}
 }
 
+/*
+ * The "fixed-clock" nodes (which includes the oscillator node if the board's
+ * DT provides one) has already been scanned by the of_clk_init() in
+ * time_init().
+ */
 int __init mpc5121_clk_init(void)
 {
 	struct device_node *clk_np;
@@ -1187,12 +1192,6 @@
 	mpc512x_clk_preset_data();
 
 	/*
-	 * have the device tree scanned for "fixed-clock" nodes (which
-	 * includes the oscillator node if the board's DT provides one)
-	 */
-	of_clk_init(NULL);
-
-	/*
 	 * add a dummy clock for those situations where a clock spec is
 	 * required yet no real clock is involved
 	 */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index e69142f4..54323d6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -836,30 +836,4 @@
 #endif
 }
 
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
-		unsigned long action, void *data)
-{
-	struct device *dev = data;
-
-	switch (action) {
-	case BUS_NOTIFY_ADD_DEVICE:
-		return iommu_add_device(dev);
-	case BUS_NOTIFY_DEL_DEVICE:
-		if (dev->iommu_group)
-			iommu_del_device(dev);
-		return 0;
-	default:
-		return 0;
-	}
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
-	.notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
-	bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
-	return 0;
-}
 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1d3d52d..7803a19 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1340,3 +1340,5 @@
 }
 
 __setup("multitce=", disable_multitce);
+
+machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 4c8008d..99824ff 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -74,7 +74,7 @@
 	parent = dentry->d_parent;
 	mutex_lock(&parent->d_inode->i_mutex);
 	if (hypfs_positive(dentry)) {
-		if (S_ISDIR(dentry->d_inode->i_mode))
+		if (d_is_dir(dentry))
 			simple_rmdir(parent->d_inode, dentry);
 		else
 			simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@
 	return nonseekable_open(inode, filp);
 }
 
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
-			      unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-	char *data;
-	ssize_t ret;
-	struct file *filp = iocb->ki_filp;
-	/* XXX: temporary */
-	char __user *buf = iov[0].iov_base;
-	size_t count = iov[0].iov_len;
+	struct file *file = iocb->ki_filp;
+	char *data = file->private_data;
+	size_t available = strlen(data);
+	loff_t pos = iocb->ki_pos;
+	size_t count;
 
-	if (nr_segs != 1)
+	if (pos < 0)
 		return -EINVAL;
-
-	data = filp->private_data;
-	ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
-	if (ret <= 0)
-		return ret;
-
-	iocb->ki_pos += ret;
-	file_accessed(filp);
-
-	return ret;
+	if (pos >= available || !iov_iter_count(to))
+		return 0;
+	count = copy_to_iter(data + pos, available - pos, to);
+	if (!count)
+		return -EFAULT;
+	iocb->ki_pos = pos + count;
+	file_accessed(file);
+	return count;
 }
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-			      unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	int rc;
 	struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
 	struct hypfs_sb_info *fs_info = sb->s_fs_info;
-	size_t count = iov_length(iov, nr_segs);
+	size_t count = iov_iter_count(from);
 
 	/*
 	 * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@
 	}
 	hypfs_update_update(sb);
 	rc = count;
+	iov_iter_advance(from, count);
 out:
 	mutex_unlock(&fs_info->lock);
 	return rc;
@@ -440,10 +437,10 @@
 static const struct file_operations hypfs_file_ops = {
 	.open		= hypfs_open,
 	.release	= hypfs_release,
-	.read		= do_sync_read,
-	.write		= do_sync_write,
-	.aio_read	= hypfs_aio_read,
-	.aio_write	= hypfs_aio_write,
+	.read		= new_sync_read,
+	.write		= new_sync_write,
+	.read_iter	= hypfs_read_iter,
+	.write_iter	= hypfs_write_iter,
 	.llseek		= no_llseek,
 };
 
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e..f407bbf 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@
 #define S390_ARCH_FAC_MASK_SIZE_U64 \
 	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
 
-struct s390_model_fac {
-	/* facilities used in SIE context */
-	__u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
-	/* subset enabled by kvm */
-	__u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+	/* facility list requested by guest */
+	__u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+	/* facility mask supported by kvm & hosting machine */
+	__u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
 };
 
 struct kvm_s390_cpu_model {
-	struct s390_model_fac *fac;
+	struct kvm_s390_fac *fac;
 	struct cpuid cpu_id;
 	unsigned short ibc;
 };
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b719..8fb3802 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@
 {
 	int cpu = smp_processor_id();
 
+	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
 	if (prev == next)
 		return;
 	if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@
 	atomic_dec(&prev->context.attach_count);
 	if (MACHINE_HAS_TLB_LC)
 		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
-	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e..53eacbd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@
 #endif
 }
 
-static inline void clear_page(void *page)
-{
-	register unsigned long reg1 asm ("1") = 0;
-	register void *reg2 asm ("2") = page;
-	register unsigned long reg3 asm ("3") = 4096;
-	asm volatile(
-		"	mvcl	2,0"
-		: "+d" (reg2), "+d" (reg3) : "d" (reg1)
-		: "memory", "cc");
-}
+#define clear_page(page)	memset((page), 0, PAGE_SIZE)
 
 /*
  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fbb5ee3..e08ec38 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -91,7 +91,9 @@
  */
 #define PTRS_PER_PTE	256
 #ifndef CONFIG_64BIT
+#define __PAGETABLE_PUD_FOLDED
 #define PTRS_PER_PMD	1
+#define __PAGETABLE_PMD_FOLDED
 #define PTRS_PER_PUD	1
 #else /* CONFIG_64BIT */
 #define PTRS_PER_PMD	2048
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c4fbb95..b1453a2 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -18,15 +18,15 @@
 	cpumask_t book_mask;
 };
 
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 
-#define topology_physical_package_id(cpu)	(cpu_topology[cpu].socket_id)
-#define topology_thread_id(cpu)			(cpu_topology[cpu].thread_id)
-#define topology_thread_cpumask(cpu)		(&cpu_topology[cpu].thread_mask)
-#define topology_core_id(cpu)			(cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu)		(&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu)			(cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu)		(&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu)		  (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu)		  (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu)		  (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).book_mask)
 
 #define mc_capable() 1
 
@@ -51,14 +51,6 @@
 #define POLARIZATION_VM		(2)
 #define POLARIZATION_VH		(3)
 
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 632fa06..0969d113 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -91,12 +91,9 @@
 {
 	if (level >= CACHE_MAX_LEVEL)
 		return CACHE_TYPE_NOCACHE;
-
 	ci += level;
-
 	if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
 		return CACHE_TYPE_NOCACHE;
-
 	return cache_type_map[ci->type];
 }
 
@@ -111,23 +108,19 @@
 }
 
 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
-			 enum cache_type type, unsigned int level)
+			 enum cache_type type, unsigned int level, int cpu)
 {
 	int ti, num_sets;
-	int cpu = smp_processor_id();
 
 	if (type == CACHE_TYPE_INST)
 		ti = CACHE_TI_INSTRUCTION;
 	else
 		ti = CACHE_TI_UNIFIED;
-
 	this_leaf->level = level + 1;
 	this_leaf->type = type;
 	this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
-	this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
-						level, ti);
+	this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
 	this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-
 	num_sets = this_leaf->size / this_leaf->coherency_line_size;
 	num_sets /= this_leaf->ways_of_associativity;
 	this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@
 
 	if (!this_cpu_ci)
 		return -EINVAL;
-
 	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
 	do {
 		ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@
 		/* Separate instruction and data caches */
 		leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
 	} while (++level < CACHE_MAX_LEVEL);
-
 	this_cpu_ci->num_levels = level;
 	this_cpu_ci->num_leaves = leaves;
-
 	return 0;
 }
 
 int populate_cache_leaves(unsigned int cpu)
 {
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 	unsigned int level, idx, pvt;
 	union cache_topology ct;
 	enum cache_type ctype;
-	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
 	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
 	for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
 	     idx < this_cpu_ci->num_leaves; idx++, level++) {
 		if (!this_leaf)
 			return -EINVAL;
-
 		pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
 		ctype = get_cache_type(&ct.ci[0], level);
 		if (ctype == CACHE_TYPE_SEPARATE) {
-			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
-			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
 		} else {
-			ci_leaf_init(this_leaf++, pvt, ctype, level);
+			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
 		}
 	}
 	return 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 70a3294..4427ab7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,17 +393,19 @@
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
 	if (test_facility(129))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-	if (test_facility(128))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
 #endif
 }
 
-static int __init nocad_setup(char *str)
+static int __init cad_setup(char *str)
 {
-	S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+	int val;
+
+	get_option(&str, &val);
+	if (val && test_facility(128))
+		S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
 	return 0;
 }
-early_param("nocad", nocad_setup);
+early_param("cad", cad_setup);
 
 static int __init cad_init(void)
 {
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e..830066f 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@
 	insn->offset = (entry->target - entry->code) >> 1;
 }
 
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+			   struct insn *new)
 {
 	unsigned char *ipc = (unsigned char *)entry->code;
-	unsigned char *ipe = (unsigned char *)insn;
+	unsigned char *ipe = (unsigned char *)expected;
+	unsigned char *ipn = (unsigned char *)new;
 
 	pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
 	pr_emerg("Found:    %02x %02x %02x %02x %02x %02x\n",
 		 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
 	pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
 		 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+	pr_emerg("New:      %02x %02x %02x %02x %02x %02x\n",
+		 ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
 	panic("Corrupted kernel text");
 }
 
@@ -69,10 +73,10 @@
 	}
 	if (init) {
 		if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
-			jump_label_bug(entry, &old);
+			jump_label_bug(entry, &orignop, &new);
 	} else {
 		if (memcmp((void *)entry->code, &old, sizeof(old)))
-			jump_label_bug(entry, &old);
+			jump_label_bug(entry, &old, &new);
 	}
 	probe_kernel_write((void *)entry->code, &new, sizeof(new));
 }
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2..2ca9586 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 {
+	jump_label_apply_nops(me);
 	vfree(me->arch.syminfo);
 	me->arch.syminfo = NULL;
 	return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 2610823..dc488e1 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
 
 static DEFINE_PER_CPU(struct cpuid, cpu_id);
 
-void cpu_relax(void)
+void notrace cpu_relax(void)
 {
 	if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
 		asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfac77a..a5ea8bc 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -909,7 +909,6 @@
 	setup_lowcore();
 	smp_fill_possible_mask();
         cpu_init();
-	s390_init_cpu_topology();
 
 	/*
 	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a668993..db8f1115 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,14 +59,13 @@
 	CPU_STATE_CONFIGURED,
 };
 
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
 struct pcpu {
-	struct cpu *cpu;
 	struct _lowcore *lowcore;	/* lowcore page(s) for the cpu */
-	unsigned long async_stack;	/* async stack for the cpu */
-	unsigned long panic_stack;	/* panic stack for the cpu */
 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
-	int state;			/* physical cpu state */
-	int polarization;		/* physical polarization */
+	signed char state;		/* physical cpu state */
+	signed char polarization;	/* physical polarization */
 	u16 address;			/* physical cpu address */
 };
 
@@ -173,25 +172,30 @@
 	pcpu_sigp_retry(pcpu, order, 0);
 }
 
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
+	unsigned long async_stack, panic_stack;
 	struct _lowcore *lc;
 
 	if (pcpu != &pcpu_devices[0]) {
 		pcpu->lowcore =	(struct _lowcore *)
 			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-		pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-		pcpu->panic_stack = __get_free_page(GFP_KERNEL);
-		if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+		panic_stack = __get_free_page(GFP_KERNEL);
+		if (!pcpu->lowcore || !panic_stack || !async_stack)
 			goto out;
+	} else {
+		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
 	}
 	lc = pcpu->lowcore;
 	memcpy(lc, &S390_lowcore, 512);
 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
-	lc->async_stack = pcpu->async_stack + ASYNC_SIZE
-		- STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-	lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
-		- STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 	lc->cpu_nr = cpu;
 	lc->spinlock_lockval = arch_spin_lockval(cpu);
 #ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@
 	return 0;
 out:
 	if (pcpu != &pcpu_devices[0]) {
-		free_page(pcpu->panic_stack);
-		free_pages(pcpu->async_stack, ASYNC_ORDER);
+		free_page(panic_stack);
+		free_pages(async_stack, ASYNC_ORDER);
 		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 	}
 	return -ENOMEM;
@@ -235,11 +239,11 @@
 #else
 	vdso_free_per_cpu(pcpu->lowcore);
 #endif
-	if (pcpu != &pcpu_devices[0]) {
-		free_page(pcpu->panic_stack);
-		free_pages(pcpu->async_stack, ASYNC_ORDER);
-		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
-	}
+	if (pcpu == &pcpu_devices[0])
+		return;
+	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
 	pcpu_delegate(&pcpu_devices[0], func, data,
-		      pcpu_devices->panic_stack + PAGE_SIZE);
+		      pcpu_devices->lowcore->panic_stack -
+		      PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@
 	pcpu->state = CPU_STATE_CONFIGURED;
 	pcpu->address = stap();
 	pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
-	pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
-		+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-	pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
-		+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 	set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@
 			  void *hcpu)
 {
 	unsigned int cpu = (unsigned int)(long)hcpu;
-	struct cpu *c = pcpu_devices[cpu].cpu;
-	struct device *s = &c->dev;
+	struct device *s = &per_cpu(cpu_device, cpu)->dev;
 	int err = 0;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
 		return -ENOMEM;
-	pcpu_devices[cpu].cpu = c;
+	per_cpu(cpu_device, cpu) = c;
 	s = &c->dev;
 	c->hotpluggable = 1;
 	rc = register_cpu(c, cpu);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 24ee33f..14da43b 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,14 +7,14 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
-#include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -42,8 +42,8 @@
 static struct mask_info socket_info;
 static struct mask_info book_info;
 
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -90,15 +90,15 @@
 		if (lcpu < 0)
 			continue;
 		for (i = 0; i <= smp_cpu_mtid; i++) {
-			cpu_topology[lcpu + i].book_id = book->id;
-			cpu_topology[lcpu + i].core_id = rcore;
-			cpu_topology[lcpu + i].thread_id = lcpu + i;
+			per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+			per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+			per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
 			cpumask_set_cpu(lcpu + i, &book->mask);
 			cpumask_set_cpu(lcpu + i, &socket->mask);
 			if (one_socket_per_cpu)
-				cpu_topology[lcpu + i].socket_id = rcore;
+				per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
 			else
-				cpu_topology[lcpu + i].socket_id = socket->id;
+				per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
 			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
 		}
 		if (one_socket_per_cpu)
@@ -249,14 +249,14 @@
 
 	spin_lock_irqsave(&topology_lock, flags);
 	for_each_possible_cpu(cpu) {
-		cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
-		cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
-		cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+		per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+		per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+		per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
 		if (!MACHINE_HAS_TOPOLOGY) {
-			cpu_topology[cpu].thread_id = cpu;
-			cpu_topology[cpu].core_id = cpu;
-			cpu_topology[cpu].socket_id = cpu;
-			cpu_topology[cpu].book_id = cpu;
+			per_cpu(cpu_topology, cpu).thread_id = cpu;
+			per_cpu(cpu_topology, cpu).core_id = cpu;
+			per_cpu(cpu_topology, cpu).socket_id = cpu;
+			per_cpu(cpu_topology, cpu).book_id = cpu;
 		}
 	}
 	spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@
 	set_topology_timer();
 }
 
-static int __init early_parse_topology(char *p)
-{
-	if (strncmp(p, "off", 3))
-		return 0;
-	topology_enabled = 0;
-	return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
-			       struct mask_info *mask, int offset)
-{
-	int i, nr_masks;
-
-	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
-	for (i = 0; i < info->mnest - offset; i++)
-		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
-	nr_masks = max(nr_masks, 1);
-	for (i = 0; i < nr_masks; i++) {
-		mask->next = alloc_bootmem_align(
-			roundup_pow_of_two(sizeof(struct mask_info)),
-			roundup_pow_of_two(sizeof(struct mask_info)));
-		mask = mask->next;
-	}
-}
-
-void __init s390_init_cpu_topology(void)
-{
-	struct sysinfo_15_1_x *info;
-	int i;
-
-	if (!MACHINE_HAS_TOPOLOGY)
-		return;
-	tl_info = alloc_bootmem_pages(PAGE_SIZE);
-	info = tl_info;
-	store_topology(info);
-	pr_info("The CPU configuration topology of the machine is:");
-	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
-		printk(KERN_CONT " %d", info->mag[i]);
-	printk(KERN_CONT " / %d\n", info->mnest);
-	alloc_masks(info, &socket_info, 1);
-	alloc_masks(info, &book_info, 2);
-}
-
 static int cpu_management;
 
 static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@
 
 const struct cpumask *cpu_thread_mask(int cpu)
 {
-	return &cpu_topology[cpu].thread_mask;
+	return &per_cpu(cpu_topology, cpu).thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-	return &cpu_topology[cpu].core_mask;
+	return &per_cpu(cpu_topology, cpu).core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-	return &cpu_topology[cpu].book_mask;
+	return &per_cpu(cpu_topology, cpu).book_mask;
 }
 
+static int __init early_parse_topology(char *p)
+{
+	if (strncmp(p, "off", 3))
+		return 0;
+	topology_enabled = 0;
+	return 0;
+}
+early_param("topology", early_parse_topology);
+
 static struct sched_domain_topology_level s390_topology[] = {
 	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@
 	{ NULL, },
 };
 
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+			       struct mask_info *mask, int offset)
+{
+	int i, nr_masks;
+
+	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+	for (i = 0; i < info->mnest - offset; i++)
+		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+	nr_masks = max(nr_masks, 1);
+	for (i = 0; i < nr_masks; i++) {
+		mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+		mask = mask->next;
+	}
+}
+
+static int __init s390_topology_init(void)
+{
+	struct sysinfo_15_1_x *info;
+	int i;
+
+	if (!MACHINE_HAS_TOPOLOGY)
+		return 0;
+	tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+	info = tl_info;
+	store_topology(info);
+	pr_info("The CPU configuration topology of the machine is:");
+	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+		printk(KERN_CONT " %d", info->mag[i]);
+	printk(KERN_CONT " / %d\n", info->mnest);
+	alloc_masks(info, &socket_info, 1);
+	alloc_masks(info, &book_info, 2);
+	set_sched_topology(s390_topology);
+	return 0;
+}
+early_initcall(s390_topology_init);
+
 static int __init topology_init(void)
 {
 	if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@
 	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
-	set_sched_topology(s390_topology);
-	return 0;
-}
-early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 7699e73..61541fb 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -25,9 +25,7 @@
 	je	4f
 	cghi	%r2,__CLOCK_REALTIME
 	je	5f
-	cghi	%r2,__CLOCK_THREAD_CPUTIME_ID
-	je	9f
-	cghi	%r2,-2		/* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+	cghi	%r2,-3		/* Per-thread CPUCLOCK with PID=0, VIRT=1 */
 	je	9f
 	cghi	%r2,__CLOCK_MONOTONIC_COARSE
 	je	3f
@@ -106,7 +104,7 @@
 	aghi	%r15,16
 	br	%r14
 
-	/* CLOCK_THREAD_CPUTIME_ID for this thread */
+	/* CPUCLOCK_VIRT for this thread */
 9:	icm	%r0,15,__VDSO_ECTG_OK(%r5)
 	jz	12f
 	ear	%r2,%a4
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c36239..19e17bd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -165,7 +165,6 @@
 	case KVM_CAP_ONE_REG:
 	case KVM_CAP_ENABLE_CAP:
 	case KVM_CAP_S390_CSS_SUPPORT:
-	case KVM_CAP_IRQFD:
 	case KVM_CAP_IOEVENTFD:
 	case KVM_CAP_DEVICE_CTRL:
 	case KVM_CAP_ENABLE_CAP_VM:
@@ -522,7 +521,7 @@
 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
 		       sizeof(struct cpuid));
 		kvm->arch.model.ibc = proc->ibc;
-		memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+		memcpy(kvm->arch.model.fac->list, proc->fac_list,
 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	} else
 		ret = -EFAULT;
@@ -556,7 +555,7 @@
 	}
 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
 	proc->ibc = kvm->arch.model.ibc;
-	memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
 		ret = -EFAULT;
 	kfree(proc);
@@ -576,10 +575,10 @@
 	}
 	get_cpu_id((struct cpuid *) &mach->cpuid);
 	mach->ibc = sclp_get_ibc();
-	memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
-	       kvm_s390_fac_list_mask_size() * sizeof(u64));
+	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
-	       S390_ARCH_FAC_LIST_SIZE_U64);
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
 		ret = -EFAULT;
 	kfree(mach);
@@ -778,15 +777,18 @@
 static int kvm_s390_query_ap_config(u8 *config)
 {
 	u32 fcn_code = 0x04000000UL;
-	u32 cc;
+	u32 cc = 0;
 
+	memset(config, 0, 128);
 	asm volatile(
 		"lgr 0,%1\n"
 		"lgr 2,%2\n"
 		".long 0xb2af0000\n"		/* PQAP(QCI) */
-		"ipm %0\n"
+		"0: ipm %0\n"
 		"srl %0,28\n"
-		: "=r" (cc)
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+r" (cc)
 		: "r" (fcn_code), "r" (config)
 		: "cc", "0", "2", "memory"
 	);
@@ -839,9 +841,13 @@
 
 	kvm_s390_set_crycb_format(kvm);
 
-	/* Disable AES/DEA protected key functions by default */
-	kvm->arch.crypto.aes_kw = 0;
-	kvm->arch.crypto.dea_kw = 0;
+	/* Enable AES/DEA protected key functions by default */
+	kvm->arch.crypto.aes_kw = 1;
+	kvm->arch.crypto.dea_kw = 1;
+	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
 
 	return 0;
 }
@@ -886,40 +892,29 @@
 	/*
 	 * The architectural maximum amount of facilities is 16 kbit. To store
 	 * this amount, 2 kbyte of memory is required. Thus we need a full
-	 * page to hold the active copy (arch.model.fac->sie) and the current
-	 * facilities set (arch.model.fac->kvm). Its address size has to be
+	 * page to hold the guest facility list (arch.model.fac->list) and the
+	 * facility mask (arch.model.fac->mask). Its address size has to be
 	 * 31 bits and word aligned.
 	 */
 	kvm->arch.model.fac =
-		(struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 	if (!kvm->arch.model.fac)
 		goto out_nofac;
 
-	memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
-	       S390_ARCH_FAC_LIST_SIZE_U64);
-
-	/*
-	 * If this KVM host runs *not* in a LPAR, relax the facility bits
-	 * of the kvm facility mask by all missing facilities. This will allow
-	 * to determine the right CPU model by means of the remaining facilities.
-	 * Live guest migration must prohibit the migration of KVMs running in
-	 * a LPAR to non LPAR hosts.
-	 */
-	if (!MACHINE_IS_LPAR)
-		for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
-			kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
-
-	/*
-	 * Apply the kvm facility mask to limit the kvm supported/tolerated
-	 * facility list.
-	 */
+	/* Populate the facility mask initially. */
+	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
 		if (i < kvm_s390_fac_list_mask_size())
-			kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
 		else
-			kvm->arch.model.fac->kvm[i] = 0UL;
+			kvm->arch.model.fac->mask[i] = 0UL;
 	}
 
+	/* Populate the facility list initially. */
+	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+	       S390_ARCH_FAC_LIST_SIZE_BYTE);
+
 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
 
@@ -1165,8 +1160,6 @@
 
 	mutex_lock(&vcpu->kvm->lock);
 	vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
-	memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
-	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
 	mutex_unlock(&vcpu->kvm->lock);
 
@@ -1212,7 +1205,7 @@
 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 	}
-	vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
+	vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
 
 	spin_lock_init(&vcpu->arch.local_int.lock);
 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c211..c34109a 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@
 /* test availability of facility in a kvm intance */
 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
 {
-	return __test_facility(nr, kvm->arch.model.fac->kvm);
+	return __test_facility(nr, kvm->arch.model.fac->mask) &&
+		__test_facility(nr, kvm->arch.model.fac->list);
 }
 
 /* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b..3511169 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@
 	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
 	 * into a u32 memory representation. They will remain bits 0-31.
 	 */
-	fac = *vcpu->kvm->arch.model.fac->sie >> 32;
+	fac = *vcpu->kvm->arch.model.fac->list >> 32;
 	rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
 			    &fac, sizeof(fac));
 	if (rc)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d008f638..179a2c2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -183,7 +183,10 @@
 {
 	unsigned long base;
 
-	base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+	base = STACK_TOP / 3 * 2;
+	if (!is_32bit_task())
+		/* Align to 4GB */
+		base &= ~((1UL << 32) - 1);
 	return base + mmap_rnd();
 }
 
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 753a567..f0b8544 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -287,7 +287,7 @@
 	addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
 	return (void __iomem *) addr + offset;
 }
-EXPORT_SYMBOL_GPL(pci_iomap_range);
+EXPORT_SYMBOL(pci_iomap_range);
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
@@ -309,7 +309,7 @@
 	}
 	spin_unlock(&zpci_iomap_lock);
 }
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
 
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 		    int size, u32 *val)
@@ -483,9 +483,8 @@
 	airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
 }
 
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
 {
-	struct pci_dev *pdev = zdev->pdev;
 	resource_size_t len;
 	int i;
 
@@ -499,9 +498,8 @@
 	}
 }
 
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
 {
-	struct pci_dev *pdev = zdev->pdev;
 	resource_size_t len;
 	int i;
 
@@ -651,7 +649,7 @@
 
 	zdev->pdev = pdev;
 	pdev->dev.groups = zpci_attr_groups;
-	zpci_map_resources(zdev);
+	zpci_map_resources(pdev);
 
 	for (i = 0; i < PCI_BAR_COUNT; i++) {
 		res = &pdev->resource[i];
@@ -663,6 +661,11 @@
 	return 0;
 }
 
+void pcibios_release_device(struct pci_dev *pdev)
+{
+	zpci_unmap_resources(pdev);
+}
+
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
 {
 	struct zpci_dev *zdev = get_zdev(pdev);
@@ -670,7 +673,6 @@
 	zdev->pdev = pdev;
 	zpci_debug_init_device(zdev);
 	zpci_fmb_enable_device(zdev);
-	zpci_map_resources(zdev);
 
 	return pci_enable_resources(pdev, mask);
 }
@@ -679,7 +681,6 @@
 {
 	struct zpci_dev *zdev = get_zdev(pdev);
 
-	zpci_unmap_resources(zdev);
 	zpci_fmb_disable_device(zdev);
 	zpci_debug_exit_device(zdev);
 	zdev->pdev = NULL;
@@ -688,7 +689,8 @@
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 static int zpci_restore(struct device *dev)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct zpci_dev *zdev = get_zdev(pdev);
 	int ret = 0;
 
 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -698,7 +700,7 @@
 	if (ret)
 		goto out;
 
-	zpci_map_resources(zdev);
+	zpci_map_resources(pdev);
 	zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
 			   zdev->start_dma + zdev->iommu_size - 1,
 			   (u64) zdev->dma_table);
@@ -709,12 +711,14 @@
 
 static int zpci_freeze(struct device *dev)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct zpci_dev *zdev = get_zdev(pdev);
 
 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
 		return 0;
 
 	zpci_unregister_ioat(zdev, 0);
+	zpci_unmap_resources(pdev);
 	return clp_disable_fh(zdev);
 }
 
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b..b1bb2b7 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@
 	if (copy_from_user(buf, user_buffer, length))
 		goto out;
 
-	memcpy_toio(io_addr, buf, length);
-	ret = 0;
+	ret = zpci_memcpy_toio(io_addr, buf, length);
 out:
 	if (buf != local_buf)
 		kfree(buf);
@@ -98,16 +97,16 @@
 		goto out;
 	io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
 
-	ret = -EFAULT;
-	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+		ret = -EFAULT;
 		goto out;
-
-	memcpy_fromio(buf, io_addr, length);
-
+	}
+	ret = zpci_memcpy_fromio(buf, io_addr, length);
+	if (ret)
+		goto out;
 	if (copy_to_user(user_buffer, buf, length))
-		goto out;
+		ret = -EFAULT;
 
-	ret = 0;
 out:
 	if (buf != local_buf)
 		kfree(buf);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96ac69c..efb00ec 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,6 +86,9 @@
 	default "arch/sparc/configs/sparc32_defconfig" if SPARC32
 	default "arch/sparc/configs/sparc64_defconfig" if SPARC64
 
+config ARCH_PROC_KCORE_TEXT
+	def_bool y
+
 config IOMMU_HELPER
 	bool
 	default y if SPARC64
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9b672be..50d4840 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,16 +407,16 @@
 {
 }
 
-#define ioread8(X)			readb(X)
-#define ioread16(X)			readw(X)
-#define ioread16be(X)			__raw_readw(X)
-#define ioread32(X)			readl(X)
-#define ioread32be(X)			__raw_readl(X)
-#define iowrite8(val,X)			writeb(val,X)
-#define iowrite16(val,X)		writew(val,X)
-#define iowrite16be(val,X)		__raw_writew(val,X)
-#define iowrite32(val,X)		writel(val,X)
-#define iowrite32be(val,X)		__raw_writel(val,X)
+#define ioread8			readb
+#define ioread16		readw
+#define ioread16be		__raw_readw
+#define ioread32		readl
+#define ioread32be		__raw_readl
+#define iowrite8		writeb
+#define iowrite16		writew
+#define iowrite16be		__raw_writew
+#define iowrite32		writel
+#define iowrite32be		__raw_writel
 
 /* Create a virtual mapping cookie for an IO port range */
 void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index c100dc2..176fa0a 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -12,7 +12,6 @@
 extern int this_is_starfire;
 
 void check_if_starfire(void);
-int starfire_hard_smp_processor_id(void);
 void starfire_hookup(int);
 unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
 
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 88d322b..07cc49e5 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -98,11 +98,7 @@
 void do_privop(struct pt_regs *regs);
 void do_privact(struct pt_regs *regs);
 void do_cee(struct pt_regs *regs);
-void do_cee_tl1(struct pt_regs *regs);
-void do_dae_tl1(struct pt_regs *regs);
-void do_iae_tl1(struct pt_regs *regs);
 void do_div0_tl1(struct pt_regs *regs);
-void do_fpdis_tl1(struct pt_regs *regs);
 void do_fpieee_tl1(struct pt_regs *regs);
 void do_fpother_tl1(struct pt_regs *regs);
 void do_ill_tl1(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index da6f1a7..61139d9 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1406,11 +1406,32 @@
 	scheduler_ipi();
 }
 
-/* This is a nop because we capture all other cpus
- * anyways when making the PROM active.
- */
+static void stop_this_cpu(void *dummy)
+{
+	prom_stopself();
+}
+
 void smp_send_stop(void)
 {
+	int cpu;
+
+	if (tlb_type == hypervisor) {
+		for_each_online_cpu(cpu) {
+			if (cpu == smp_processor_id())
+				continue;
+#ifdef CONFIG_SUN_LDOMS
+			if (ldom_domaining_enabled) {
+				unsigned long hv_err;
+				hv_err = sun4v_cpu_stop(cpu);
+				if (hv_err)
+					printk(KERN_ERR "sun4v_cpu_stop() "
+					       "failed err=%lu\n", hv_err);
+			} else
+#endif
+				prom_stopcpu_cpuid(cpu);
+		}
+	} else
+		smp_call_function(stop_this_cpu, NULL, 0);
 }
 
 /**
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a5..167fdfd 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -28,11 +28,6 @@
 		this_is_starfire = 1;
 }
 
-int starfire_hard_smp_processor_id(void)
-{
-	return upa_readl(0x1fff40000d0UL);
-}
-
 /*
  * Each Starfire board has 32 registers which perform translation
  * and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c85403d..30e7ddb 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -333,7 +333,7 @@
 	long err;
 
 	/* No need for backward compatibility. We can start fresh... */
-	if (call <= SEMCTL) {
+	if (call <= SEMTIMEDOP) {
 		switch (call) {
 		case SEMOP:
 			err = sys_semtimedop(first, ptr,
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index a27651e..0e69974 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2427,6 +2427,8 @@
 		}
 		user_instruction_dump ((unsigned int __user *) regs->tpc);
 	}
+	if (panic_on_oops)
+		panic("Fatal exception");
 	if (regs->tstate & TSTATE_PRIV)
 		do_exit(SIGKILL);
 	do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@
 	die_if_kernel("TL0: Cache Error Exception", regs);
 }
 
-void do_cee_tl1(struct pt_regs *regs)
-{
-	exception_enter();
-	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-	die_if_kernel("TL1: Cache Error Exception", regs);
-}
-
-void do_dae_tl1(struct pt_regs *regs)
-{
-	exception_enter();
-	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-	die_if_kernel("TL1: Data Access Exception", regs);
-}
-
-void do_iae_tl1(struct pt_regs *regs)
-{
-	exception_enter();
-	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-	die_if_kernel("TL1: Instruction Access Exception", regs);
-}
-
 void do_div0_tl1(struct pt_regs *regs)
 {
 	exception_enter();
@@ -2592,13 +2573,6 @@
 	die_if_kernel("TL1: DIV0 Exception", regs);
 }
 
-void do_fpdis_tl1(struct pt_regs *regs)
-{
-	exception_enter();
-	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-	die_if_kernel("TL1: FPU Disabled", regs);
-}
-
 void do_fpieee_tl1(struct pt_regs *regs)
 {
 	exception_enter();
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3ea267c..4ca0d6b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2820,7 +2820,7 @@
 
 	return 0;
 }
-device_initcall(report_memory);
+arch_initcall(report_memory);
 
 #ifdef CONFIG_SMP
 #define do_flush_tlb_kernel_range	smp_flush_tlb_kernel_range
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb1cf89..b7d31ca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -488,6 +488,23 @@
 	  Intel MID platforms are based on an Intel processor and chipset which
 	  consume less power than most of the x86 derivatives.
 
+config X86_INTEL_QUARK
+	bool "Intel Quark platform support"
+	depends on X86_32
+	depends on X86_EXTENDED_PLATFORM
+	depends on X86_PLATFORM_DEVICES
+	depends on X86_TSC
+	depends on PCI
+	depends on PCI_GOANY
+	depends on X86_IO_APIC
+	select IOSF_MBI
+	select INTEL_IMR
+	select COMMON_CLK
+	---help---
+	  Select to include support for Quark X1000 SoC.
+	  Say Y here if you have a Quark based system such as the Arduino
+	  compatible Intel Galileo.
+
 config X86_INTEL_LPSS
 	bool "Intel Low Power Subsystem Support"
 	depends on ACPI
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 61bd2ad..20028da 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -313,6 +313,19 @@
 
 	  If unsure, say N.
 
+config DEBUG_IMR_SELFTEST
+	bool "Isolated Memory Region self test"
+	default n
+	depends on INTEL_IMR
+	---help---
+	  This option enables automated sanity testing of the IMR code.
+	  Some simple tests are run to verify IMR bounds checking, alignment
+	  and overlapping. This option is really only useful if you are
+	  debugging an IMR memory map or are modifying the IMR code and want to
+	  test your changes.
+
+	  If unsure say N here.
+
 config X86_DEBUG_STATIC_CPU_HAS
 	bool "Debug alternatives"
 	depends on DEBUG_KERNEL
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 843feb3..0a291cd 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -51,6 +51,7 @@
 
 vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
 	$(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
 	$(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
index 7ff3632..99494df 100644
--- a/arch/x86/boot/compressed/efi_stub_64.S
+++ b/arch/x86/boot/compressed/efi_stub_64.S
@@ -3,28 +3,3 @@
 #include <asm/processor-flags.h>
 
 #include "../../platform/efi/efi_stub_64.S"
-
-#ifdef CONFIG_EFI_MIXED
-	.code64
-	.text
-ENTRY(efi64_thunk)
-	push	%rbp
-	push	%rbx
-
-	subq	$16, %rsp
-	leaq	efi_exit32(%rip), %rax
-	movl	%eax, 8(%rsp)
-	leaq	efi_gdt64(%rip), %rax
-	movl	%eax, 4(%rsp)
-	movl	%eax, 2(%rax)		/* Fixup the gdt base address */
-	leaq	efi32_boot_gdt(%rip), %rax
-	movl	%eax, (%rsp)
-
-	call	__efi64_thunk
-
-	addq	$16, %rsp
-	pop	%rbx
-	pop	%rbp
-	ret
-ENDPROC(efi64_thunk)
-#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
new file mode 100644
index 0000000..630384a
--- /dev/null
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * since the firmware's 32-bit IDT is still currently installed and it
+ * needs to be able to service interrupts.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identify
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+	.code64
+	.text
+ENTRY(efi64_thunk)
+	push	%rbp
+	push	%rbx
+
+	subq	$8, %rsp
+	leaq	efi_exit32(%rip), %rax
+	movl	%eax, 4(%rsp)
+	leaq	efi_gdt64(%rip), %rax
+	movl	%eax, (%rsp)
+	movl	%eax, 2(%rax)		/* Fixup the gdt base address */
+
+	movl	%ds, %eax
+	push	%rax
+	movl	%es, %eax
+	push	%rax
+	movl	%ss, %eax
+	push	%rax
+
+	/*
+	 * Convert x86-64 ABI params to i386 ABI
+	 */
+	subq	$32, %rsp
+	movl	%esi, 0x0(%rsp)
+	movl	%edx, 0x4(%rsp)
+	movl	%ecx, 0x8(%rsp)
+	movq	%r8, %rsi
+	movl	%esi, 0xc(%rsp)
+	movq	%r9, %rsi
+	movl	%esi,  0x10(%rsp)
+
+	sgdt	save_gdt(%rip)
+
+	leaq	1f(%rip), %rbx
+	movq	%rbx, func_rt_ptr(%rip)
+
+	/*
+	 * Switch to gdt with 32-bit segments. This is the firmware GDT
+	 * that was installed when the kernel started executing. This
+	 * pointer was saved at the EFI stub entry point in head_64.S.
+	 */
+	leaq	efi32_boot_gdt(%rip), %rax
+	lgdt	(%rax)
+
+	pushq	$__KERNEL_CS
+	leaq	efi_enter32(%rip), %rax
+	pushq	%rax
+	lretq
+
+1:	addq	$32, %rsp
+
+	lgdt	save_gdt(%rip)
+
+	pop	%rbx
+	movl	%ebx, %ss
+	pop	%rbx
+	movl	%ebx, %es
+	pop	%rbx
+	movl	%ebx, %ds
+
+	/*
+	 * Convert 32-bit status code into 64-bit.
+	 */
+	test	%rax, %rax
+	jz	1f
+	movl	%eax, %ecx
+	andl	$0x0fffffff, %ecx
+	andl	$0xf0000000, %eax
+	shl	$32, %rax
+	or	%rcx, %rax
+1:
+	addq	$8, %rsp
+	pop	%rbx
+	pop	%rbp
+	ret
+ENDPROC(efi64_thunk)
+
+ENTRY(efi_exit32)
+	movq	func_rt_ptr(%rip), %rax
+	push	%rax
+	mov	%rdi, %rax
+	ret
+ENDPROC(efi_exit32)
+
+	.code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+	movl	$__KERNEL_DS, %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	%eax, %ss
+
+	/* Reload pgtables */
+	movl	%cr3, %eax
+	movl	%eax, %cr3
+
+	/* Disable paging */
+	movl	%cr0, %eax
+	btrl	$X86_CR0_PG_BIT, %eax
+	movl	%eax, %cr0
+
+	/* Disable long mode via EFER */
+	movl	$MSR_EFER, %ecx
+	rdmsr
+	btrl	$_EFER_LME, %eax
+	wrmsr
+
+	call	*%edi
+
+	/* We must preserve return value */
+	movl	%eax, %edi
+
+	/*
+	 * Some firmware will return with interrupts enabled. Be sure to
+	 * disable them before we switch GDTs.
+	 */
+	cli
+
+	movl	56(%esp), %eax
+	movl	%eax, 2(%eax)
+	lgdtl	(%eax)
+
+	movl	%cr4, %eax
+	btsl	$(X86_CR4_PAE_BIT), %eax
+	movl	%eax, %cr4
+
+	movl	%cr3, %eax
+	movl	%eax, %cr3
+
+	movl	$MSR_EFER, %ecx
+	rdmsr
+	btsl	$_EFER_LME, %eax
+	wrmsr
+
+	xorl	%eax, %eax
+	lldt	%ax
+
+	movl	60(%esp), %eax
+	pushl	$__KERNEL_CS
+	pushl	%eax
+
+	/* Enable paging */
+	movl	%cr0, %eax
+	btsl	$X86_CR0_PG_BIT, %eax
+	movl	%eax, %cr0
+	lret
+ENDPROC(efi_enter32)
+
+	.data
+	.balign	8
+	.global	efi32_boot_gdt
+efi32_boot_gdt:	.word	0
+		.quad	0
+
+save_gdt:	.word	0
+		.quad	0
+func_rt_ptr:	.quad	0
+
+	.global efi_gdt64
+efi_gdt64:
+	.word	efi_gdt64_end - efi_gdt64
+	.long	0			/* Filled out by user */
+	.word	0
+	.quad	0x0000000000000000	/* NULL descriptor */
+	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
+	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
+	.quad	0x0080890000000000	/* TS descriptor */
+	.quad   0x0000000000000000	/* TS continued */
+efi_gdt64_end:
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 947c6bf..54f60ab 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1155,7 +1155,7 @@
 		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
 		if (!src)
 			return -ENOMEM;
-		assoc = (src + req->cryptlen + auth_tag_len);
+		assoc = (src + req->cryptlen);
 		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
 		scatterwalk_map_and_copy(assoc, req->assoc, 0,
 			req->assoclen, 0);
@@ -1180,7 +1180,7 @@
 		scatterwalk_done(&src_sg_walk, 0, 0);
 		scatterwalk_done(&assoc_sg_walk, 0, 0);
 	} else {
-		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+		scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
 		kfree(src);
 	}
 	return retval;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 92003f3..efc3b22 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -213,7 +213,15 @@
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+	return -1;
+}
+#else
 extern int apic_force_enable(unsigned long addr);
+#endif
 
 extern int apic_bsp_setup(bool upmode);
 extern void apic_ap_setup(void);
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0dbc082..72ba21a 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -370,7 +370,7 @@
 	preempt_disable();
 	tsk->thread.fpu_counter = 0;
 	__drop_fpu(tsk);
-	clear_used_math();
+	clear_stopped_child_used_math(tsk);
 	preempt_enable();
 }
 
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
new file mode 100644
index 0000000..cd2ce40
--- /dev/null
+++ b/arch/x86/include/asm/imr.h
@@ -0,0 +1,60 @@
+/*
+ * imr.h: Isolated Memory Region API
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _IMR_H
+#define _IMR_H
+
+#include <linux/types.h>
+
+/*
+ * IMR agent access mask bits
+ * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
+ * definitions.
+ */
+#define IMR_ESRAM_FLUSH		BIT(31)
+#define IMR_CPU_SNOOP		BIT(30)		/* Applicable only to write */
+#define IMR_RMU			BIT(29)
+#define IMR_VC1_SAI_ID3		BIT(15)
+#define IMR_VC1_SAI_ID2		BIT(14)
+#define IMR_VC1_SAI_ID1		BIT(13)
+#define IMR_VC1_SAI_ID0		BIT(12)
+#define IMR_VC0_SAI_ID3		BIT(11)
+#define IMR_VC0_SAI_ID2		BIT(10)
+#define IMR_VC0_SAI_ID1		BIT(9)
+#define IMR_VC0_SAI_ID0		BIT(8)
+#define IMR_CPU_0		BIT(1)		/* SMM mode */
+#define IMR_CPU			BIT(0)		/* Non SMM mode */
+#define IMR_ACCESS_NONE		0
+
+/*
+ * Read/Write access-all bits here include some reserved bits
+ * These are the values firmware uses and are accepted by hardware.
+ * The kernel defines read/write access-all in the same way as firmware
+ * in order to have a consistent and crisp definition across firmware,
+ * bootloader and kernel.
+ */
+#define IMR_READ_ACCESS_ALL	0xBFFFFFFF
+#define IMR_WRITE_ACCESS_ALL	0xFFFFFFFF
+
+/* Number of IMRs provided by Quark X1000 SoC */
+#define QUARK_X1000_IMR_MAX	0x08
+#define QUARK_X1000_IMR_REGBASE 0x40
+
+/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
+#define IMR_ALIGN		0x400
+#define IMR_MASK		(IMR_ALIGN - 1)
+
+int imr_add_range(phys_addr_t base, size_t size,
+		  unsigned int rmask, unsigned int wmask, bool lock);
+
+int imr_remove_range(phys_addr_t base, size_t size);
+
+#endif /* _IMR_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 7050d86..cf87de3 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -46,7 +46,7 @@
 
 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
 {
-	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+	set_bit(0, (volatile unsigned long *)&lock->tickets.head);
 }
 
 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@
 }
 
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
+{
+	return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+							__ticket_t head)
+{
+	if (head & TICKET_SLOWPATH_FLAG) {
+		arch_spinlock_t old, new;
+
+		old.tickets.head = head;
+		new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
+		old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
+		new.tickets.tail = old.tickets.tail;
+
+		/* try to clear slowpath flag when there are no contenders */
+		cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+	}
+}
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
-	return lock.tickets.head == lock.tickets.tail;
+	return __tickets_equal(lock.tickets.head, lock.tickets.tail);
 }
 
 /*
@@ -87,18 +107,21 @@
 	if (likely(inc.head == inc.tail))
 		goto out;
 
-	inc.tail &= ~TICKET_SLOWPATH_FLAG;
 	for (;;) {
 		unsigned count = SPIN_THRESHOLD;
 
 		do {
-			if (READ_ONCE(lock->tickets.head) == inc.tail)
-				goto out;
+			inc.head = READ_ONCE(lock->tickets.head);
+			if (__tickets_equal(inc.head, inc.tail))
+				goto clear_slowpath;
 			cpu_relax();
 		} while (--count);
 		__ticket_lock_spinning(lock, inc.tail);
 	}
-out:	barrier();	/* make sure nothing creeps before the lock is taken */
+clear_slowpath:
+	__ticket_check_and_clear_slowpath(lock, inc.head);
+out:
+	barrier();	/* make sure nothing creeps before the lock is taken */
 }
 
 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -106,56 +129,30 @@
 	arch_spinlock_t old, new;
 
 	old.tickets = READ_ONCE(lock->tickets);
-	if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
+	if (!__tickets_equal(old.tickets.head, old.tickets.tail))
 		return 0;
 
 	new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
+	new.head_tail &= ~TICKET_SLOWPATH_FLAG;
 
 	/* cmpxchg is a full barrier, so nothing can move before it */
 	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
-					    arch_spinlock_t old)
-{
-	arch_spinlock_t new;
-
-	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
-
-	/* Perform the unlock on the "before" copy */
-	old.tickets.head += TICKET_LOCK_INC;
-
-	/* Clear the slowpath flag */
-	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
-
-	/*
-	 * If the lock is uncontended, clear the flag - use cmpxchg in
-	 * case it changes behind our back though.
-	 */
-	if (new.tickets.head != new.tickets.tail ||
-	    cmpxchg(&lock->head_tail, old.head_tail,
-					new.head_tail) != old.head_tail) {
-		/*
-		 * Lock still has someone queued for it, so wake up an
-		 * appropriate waiter.
-		 */
-		__ticket_unlock_kick(lock, old.tickets.head);
-	}
-}
-
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
 	if (TICKET_SLOWPATH_FLAG &&
-	    static_key_false(&paravirt_ticketlocks_enabled)) {
-		arch_spinlock_t prev;
+		static_key_false(&paravirt_ticketlocks_enabled)) {
+		__ticket_t head;
 
-		prev = *lock;
-		add_smp(&lock->tickets.head, TICKET_LOCK_INC);
+		BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
 
-		/* add_smp() is a full mb() */
+		head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
 
-		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
-			__ticket_unlock_slowpath(lock, prev);
+		if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
+			head &= ~TICKET_SLOWPATH_FLAG;
+			__ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
+		}
 	} else
 		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
@@ -164,14 +161,15 @@
 {
 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-	return tmp.tail != tmp.head;
+	return !__tickets_equal(tmp.tail, tmp.head);
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+	tmp.head &= ~TICKET_SLOWPATH_FLAG;
+	return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
 #define arch_spin_is_contended	arch_spin_is_contended
 
@@ -191,8 +189,8 @@
 		 * We need to check "unlocked" in a loop, tmp.head == head
 		 * can be false positive because of overflow.
 		 */
-		if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
-		    tmp.head != head)
+		if (__tickets_equal(tmp.head, tmp.tail) ||
+				!__tickets_equal(tmp.head, head))
 			break;
 
 		cpu_relax();
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 5fa9770..c9a6d68 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -82,18 +82,15 @@
 	if (boot_cpu_has(X86_FEATURE_XSAVES))
 		asm volatile("1:"XSAVES"\n\t"
 			"2:\n\t"
-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+			     xstate_fault
+			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 			:   "memory");
 	else
 		asm volatile("1:"XSAVE"\n\t"
 			"2:\n\t"
-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+			     xstate_fault
+			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 			:   "memory");
-
-	asm volatile(xstate_fault
-		     : "0" (0)
-		     : "memory");
-
 	return err;
 }
 
@@ -112,18 +109,15 @@
 	if (boot_cpu_has(X86_FEATURE_XSAVES))
 		asm volatile("1:"XRSTORS"\n\t"
 			"2:\n\t"
-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+			     xstate_fault
+			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 			:   "memory");
 	else
 		asm volatile("1:"XRSTOR"\n\t"
 			"2:\n\t"
-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+			     xstate_fault
+			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 			:   "memory");
-
-	asm volatile(xstate_fault
-		     : "0" (0)
-		     : "memory");
-
 	return err;
 }
 
@@ -149,9 +143,9 @@
 	 */
 	alternative_input_2(
 		"1:"XSAVE,
-		"1:"XSAVEOPT,
+		XSAVEOPT,
 		X86_FEATURE_XSAVEOPT,
-		"1:"XSAVES,
+		XSAVES,
 		X86_FEATURE_XSAVES,
 		[fx] "D" (fx), "a" (lmask), "d" (hmask) :
 		"memory");
@@ -178,7 +172,7 @@
 	 */
 	alternative_input(
 		"1: " XRSTOR,
-		"1: " XRSTORS,
+		XRSTORS,
 		X86_FEATURE_XSAVES,
 		"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
 		: "memory");
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae97ed0..803b684 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -613,6 +613,11 @@
 {
 	int rc, irq, trigger, polarity;
 
+	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+		*irqp = gsi;
+		return 0;
+	}
+
 	rc = acpi_get_override_irq(gsi, &trigger, &polarity);
 	if (rc == 0) {
 		trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
@@ -1333,6 +1338,26 @@
 }
 
 /*
+ * ACPI offers an alternative platform interface model that removes
+ * ACPI hardware requirements for platforms that do not implement
+ * the PC Architecture.
+ *
+ * We initialize the Hardware-reduced ACPI model here:
+ */
+static void __init acpi_reduced_hw_init(void)
+{
+	if (acpi_gbl_reduced_hardware) {
+		/*
+		 * Override x86_init functions and bypass legacy pic
+		 * in Hardware-reduced ACPI mode
+		 */
+		x86_init.timers.timer_init	= x86_init_noop;
+		x86_init.irqs.pre_vector_init	= x86_init_noop;
+		legacy_pic			= &null_legacy_pic;
+	}
+}
+
+/*
  * If your system is blacklisted here, but you find that acpi=force
  * works for you, please contact linux-acpi@vger.kernel.org
  */
@@ -1531,6 +1556,11 @@
 	 */
 	early_acpi_process_madt();
 
+	/*
+	 * Hardware-reduced ACPI mode initialization:
+	 */
+	acpi_reduced_hw_init();
+
 	return 0;
 }
 
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index c2fd21f..017149c 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -37,10 +37,12 @@
 static unsigned int get_apic_id(unsigned long x)
 {
 	unsigned long value;
-	unsigned int id;
+	unsigned int id = (x >> 24) & 0xff;
 
-	rdmsrl(MSR_FAM10H_NODE_ID, value);
-	id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U);
+	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+		rdmsrl(MSR_FAM10H_NODE_ID, value);
+		id |= (value << 2) & 0xff00;
+	}
 
 	return id;
 }
@@ -155,10 +157,18 @@
 
 static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 {
-	if (c->phys_proc_id != node) {
-		c->phys_proc_id = node;
-		per_cpu(cpu_llc_id, smp_processor_id()) = node;
+	u64 val;
+	u32 nodes = 1;
+
+	this_cpu_write(cpu_llc_id, node);
+
+	/* Account for nodes per socket in multi-core-module processors */
+	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+		rdmsrl(MSR_FAM10H_NODE_ID, val);
+		nodes = ((val >> 3) & 7) + 1;
 	}
+
+	c->phys_proc_id = node / nodes;
 }
 
 static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b5c8ff5..2346c95 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1396,6 +1396,12 @@
 
 	wait_for_master_cpu(cpu);
 
+	/*
+	 * Initialize the CR4 shadow before doing anything that could
+	 * try to read it.
+	 */
+	cr4_init_shadow();
+
 	show_ucode_info_early();
 
 	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 94d7dcb..50163fa 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -565,8 +565,8 @@
 	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
 	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
 	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
-	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set ssociative" },
-	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set ssociative" },
+	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
+	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
 	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
 	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index c6826d1..746e7fd 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -196,6 +196,11 @@
 		struct microcode_header_intel mc_header;
 		unsigned int mc_size;
 
+		if (leftover < sizeof(mc_header)) {
+			pr_err("error! Truncated header in microcode data file\n");
+			break;
+		}
+
 		if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
 			break;
 
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index ec9df6f..420eb93 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -321,7 +321,11 @@
 	unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
 	int i;
 
-	while (leftover) {
+	while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+		if (leftover < sizeof(mc_header))
+			break;
+
 		mc_header = (struct microcode_header_intel *)ucode_ptr;
 
 		mc_size = get_totalsize(mc_header);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 000d419..31e2d5b 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -982,6 +982,9 @@
 ENTRY(xen_do_upcall)
 1:	mov %esp, %eax
 	call xen_evtchn_do_upcall
+#ifndef CONFIG_PREEMPT
+	call xen_maybe_preempt_hcall
+#endif
 	jmp  ret_from_intr
 	CFI_ENDPROC
 ENDPROC(xen_hypervisor_callback)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index db13655..1d74d16 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -269,11 +269,14 @@
 	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
 	jz   1f
 
-	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
-	jnz  int_ret_from_sys_call
-
-	RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
-	jmp ret_from_sys_call			# go to the SYSRET fastpath
+	/*
+	 * By the time we get here, we have no idea whether our pt_regs,
+	 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+	 * the slow path, or one of the ia32entry paths.
+	 * Use int_ret_from_sys_call to return, since it can safely handle
+	 * all of the above.
+	 */
+	jmp  int_ret_from_sys_call
 
 1:
 	subq $REST_SKIP, %rsp	# leave space for volatiles
@@ -1208,6 +1211,9 @@
 	popq %rsp
 	CFI_DEF_CFA_REGISTER rsp
 	decl PER_CPU_VAR(irq_count)
+#ifndef CONFIG_PREEMPT
+	call xen_maybe_preempt_hcall
+#endif
 	jmp  error_exit
 	CFI_ENDPROC
 END(xen_do_hypervisor_callback)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 705ef8d..67b1cbe 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -302,6 +302,9 @@
 		irq = __this_cpu_read(vector_irq[vector]);
 		if (irq >= 0) {
 			desc = irq_to_desc(irq);
+			if (!desc)
+				continue;
+
 			data = irq_desc_get_irq_data(desc);
 			cpumask_copy(&affinity_new, data->affinity);
 			cpu_clear(this_cpu, affinity_new);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 98f654d..4e3d5a96 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -84,7 +84,7 @@
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
 	/*      ----------------------------------------------          */
 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
+	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
@@ -223,27 +223,48 @@
 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
 	struct kprobe *kp;
+	unsigned long faddr;
 
 	kp = get_kprobe((void *)addr);
-	/* There is no probe, return original address */
-	if (!kp)
+	faddr = ftrace_location(addr);
+	/*
+	 * Addresses inside the ftrace location are refused by
+	 * arch_check_ftrace_location(). Something went terribly wrong
+	 * if such an address is checked here.
+	 */
+	if (WARN_ON(faddr && faddr != addr))
+		return 0UL;
+	/*
+	 * Use the current code if it is not modified by Kprobe
+	 * and it cannot be modified by ftrace.
+	 */
+	if (!kp && !faddr)
 		return addr;
 
 	/*
-	 *  Basically, kp->ainsn.insn has an original instruction.
-	 *  However, RIP-relative instruction can not do single-stepping
-	 *  at different place, __copy_instruction() tweaks the displacement of
-	 *  that instruction. In that case, we can't recover the instruction
-	 *  from the kp->ainsn.insn.
+	 * Basically, kp->ainsn.insn has an original instruction.
+	 * However, RIP-relative instruction can not do single-stepping
+	 * at different place, __copy_instruction() tweaks the displacement of
+	 * that instruction. In that case, we can't recover the instruction
+	 * from the kp->ainsn.insn.
 	 *
-	 *  On the other hand, kp->opcode has a copy of the first byte of
-	 *  the probed instruction, which is overwritten by int3. And
-	 *  the instruction at kp->addr is not modified by kprobes except
-	 *  for the first byte, we can recover the original instruction
-	 *  from it and kp->opcode.
+	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
+	 * of the first byte of the probed instruction, which is overwritten
+	 * by int3. And the instruction at kp->addr is not modified by kprobes
+	 * except for the first byte, we can recover the original instruction
+	 * from it and kp->opcode.
+	 *
+	 * In case of Kprobes using ftrace, we do not have a copy of
+	 * the original instruction. In fact, the ftrace location might
+	 * be modified at anytime and even could be in an inconsistent state.
+	 * Fortunately, we know that the original code is the ideal 5-byte
+	 * long NOP.
 	 */
-	memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-	buf[0] = kp->opcode;
+	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	if (faddr)
+		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+	else
+		buf[0] = kp->opcode;
 	return (unsigned long)buf;
 }
 
@@ -251,6 +272,7 @@
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
+ * Returns zero if the instruction can not get recovered.
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 {
@@ -285,6 +307,8 @@
 		 * normally used, we just go through if there is no kprobe.
 		 */
 		__addr = recover_probed_instruction(buf, addr);
+		if (!__addr)
+			return 0;
 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
 		insn_get_length(&insn);
 
@@ -333,6 +357,8 @@
 	unsigned long recovered_insn =
 		recover_probed_instruction(buf, (unsigned long)src);
 
+	if (!recovered_insn)
+		return 0;
 	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 	insn_get_length(&insn);
 	/* Another subsystem puts a breakpoint, failed to recover */
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 0dd8d08..7b3b9d1 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -259,6 +259,8 @@
 			 */
 			return 0;
 		recovered_insn = recover_probed_instruction(buf, addr);
+		if (!recovered_insn)
+			return 0;
 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
 		insn_get_length(&insn);
 		/* Another subsystem puts a breakpoint */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 94f6434..e354cc6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -609,7 +609,7 @@
 	u8 ret;
 	u8 old;
 
-	old = ACCESS_ONCE(zero_stats);
+	old = READ_ONCE(zero_stats);
 	if (unlikely(old)) {
 		ret = cmpxchg(&zero_stats, old, 0);
 		/* This ensures only one fellow resets the stat */
@@ -727,6 +727,7 @@
 	int cpu;
 	u64 start;
 	unsigned long flags;
+	__ticket_t head;
 
 	if (in_nmi())
 		return;
@@ -768,11 +769,15 @@
 	 */
 	__ticket_enter_slowpath(lock);
 
+	/* make sure enter_slowpath, which is atomic does not cross the read */
+	smp_mb__after_atomic();
+
 	/*
 	 * check again make sure it didn't become free while
 	 * we weren't looking.
 	 */
-	if (ACCESS_ONCE(lock->tickets.head) == want) {
+	head = READ_ONCE(lock->tickets.head);
+	if (__tickets_equal(head, want)) {
 		add_stats(TAKEN_SLOW_PICKUP, 1);
 		goto out;
 	}
@@ -803,8 +808,8 @@
 	add_stats(RELEASED_SLOW, 1);
 	for_each_cpu(cpu, &waiting_cpus) {
 		const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
-		if (ACCESS_ONCE(w->lock) == lock &&
-		    ACCESS_ONCE(w->want) == ticket) {
+		if (READ_ONCE(w->lock) == lock &&
+		    READ_ONCE(w->want) == ticket) {
 			add_stats(RELEASED_SLOW_KICKED, 1);
 			kvm_kick_cpu(cpu);
 			break;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9d2073e..4ff5d16 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -384,7 +384,7 @@
 		goto exit;
 	conditional_sti(regs);
 
-	if (!user_mode(regs))
+	if (!user_mode_vm(regs))
 		die("bounds", regs, error_code);
 
 	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
@@ -637,7 +637,7 @@
 	 * then it's very likely the result of an icebp/int01 trap.
 	 * User wants a sigtrap for that.
 	 */
-	if (!dr6 && user_mode(regs))
+	if (!dr6 && user_mode_vm(regs))
 		user_icebp = 1;
 
 	/* Catch kmemcheck conditions first of all! */
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 8b96a94..81f8adb0 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -66,27 +66,54 @@
  * Good-instruction tables for 32-bit apps.  This is non-const and volatile
  * to keep gcc from statically optimizing it out, as variable_test_bit makes
  * some versions of gcc to think only *(unsigned long*) is used.
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
+ *	(why we support bound (62) then? it's similar, and similarly unused...)
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * 07,17,1f - pop es/ss/ds
+ *	Normally not used in userspace, but would execute if used.
+ *	Can cause GP or stack exception if tries to load wrong segment descriptor.
+ *	We hesitate to run them under single step since kernel's handling
+ *	of userspace single-stepping (TF flag) is fragile.
+ *	We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
+ *	on the same grounds that they are never used.
+ * cd - int N.
+ *	Used by userspace for "int 80" syscall entry. (Other "int N"
+ *	cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *	Not supported since kernel's handling of userspace single-stepping
+ *	(TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
  */
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static volatile u32 good_insns_32[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
-	W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
+	W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
 	W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
-	W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
-	W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
+	W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+	W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
 	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-	W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
 	W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
 	W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
 	W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
 	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
-	W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+	W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
 	W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-	W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+	W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
 	/*      ----------------------------------------------         */
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -94,27 +121,61 @@
 #define good_insns_32	NULL
 #endif
 
-/* Good-instruction tables for 64-bit apps */
+/* Good-instruction tables for 64-bit apps.
+ *
+ * Genuinely invalid opcodes:
+ * 06,07 - formerly push/pop es
+ * 0e - formerly push cs
+ * 16,17 - formerly push/pop ss
+ * 1e,1f - formerly push/pop ds
+ * 27,2f,37,3f - formerly daa/das/aaa/aas
+ * 60,61 - formerly pusha/popa
+ * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
+ * 82 - formerly redundant encoding of Group1
+ * 9a - formerly call seg:ofs
+ * ce - formerly into
+ * d4,d5 - formerly aam/aad
+ * d6 - formerly undocumented salc
+ * ea - formerly jmp seg:ofs
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * cd - int N.
+ *	Used by userspace for "int 80" syscall entry. (Other "int N"
+ *	cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *	Not supported since kernel's handling of userspace single-stepping
+ *	(TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
+ */
 #if defined(CONFIG_X86_64)
 static volatile u32 good_insns_64[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
-	W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
+	W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
 	W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
-	W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
-	W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
-	W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
+	W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
+	W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
+	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
 	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-	W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+	W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
 	W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
 	W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
-	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
 	W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
-	W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
 	W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
-	W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-	W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+	W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
+	W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
 	/*      ----------------------------------------------         */
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -122,49 +183,55 @@
 #define good_insns_64	NULL
 #endif
 
-/* Using this for both 64-bit and 32-bit apps */
+/* Using this for both 64-bit and 32-bit apps.
+ * Opcodes we don't support:
+ * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
+ * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
+ *	Also encodes tons of other system insns if mod=11.
+ *	Some are in fact non-system: xend, xtest, rdtscp, maybe more
+ * 0f 05 - syscall
+ * 0f 06 - clts (CPL0 insn)
+ * 0f 07 - sysret
+ * 0f 08 - invd (CPL0 insn)
+ * 0f 09 - wbinvd (CPL0 insn)
+ * 0f 0b - ud2
+ * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
+ * 0f 34 - sysenter
+ * 0f 35 - sysexit
+ * 0f 37 - getsec
+ * 0f 78 - vmread (Intel VMX. CPL0 insn)
+ * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
+ *	Note: with prefixes, these two opcodes are
+ *	extrq/insertq/AVX512 convert vector ops.
+ * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
+ *	{rd,wr}{fs,gs}base,{s,l,m}fence.
+ *	Why? They are all user-executable.
+ */
 static volatile u32 good_2byte_insns[256 / 32] = {
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 	/*      ----------------------------------------------         */
-	W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
-	W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
-	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
-	W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+	W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
+	W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+	W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+	W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
 	W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
-	W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+	W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
 	W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-	W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
-	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+	W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
 	W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-	W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+	W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
 	W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
-	W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* f0 */
+	W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)   /* f0 */
 	/*      ----------------------------------------------         */
 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
 #undef W
 
 /*
- * opcodes we'll probably never support:
- *
- *  6c-6d, e4-e5, ec-ed - in
- *  6e-6f, e6-e7, ee-ef - out
- *  cc, cd - int3, int
- *  cf - iret
- *  d6 - illegal instruction
- *  f1 - int1/icebp
- *  f4 - hlt
- *  fa, fb - cli, sti
- *  0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
- *
- * invalid opcodes in 64-bit mode:
- *
- *  06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
- *  63 - we support this opcode in x86_64 but not in i386.
- *
  * opcodes we may need to refine support for:
  *
  *  0f - 2-byte instructions: For many of these instructions, the validity
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 34f66e5..cdc6cf9 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -379,7 +379,7 @@
 		 * thread's fpu state, reconstruct fxstate from the fsave
 		 * header. Sanitize the copied state etc.
 		 */
-		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+		struct fpu *fpu = &tsk->thread.fpu;
 		struct user_i387_ia32_struct env;
 		int err = 0;
 
@@ -393,14 +393,15 @@
 		 */
 		drop_fpu(tsk);
 
-		if (__copy_from_user(xsave, buf_fx, state_size) ||
+		if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
 		    __copy_from_user(&env, buf, sizeof(env))) {
+			fpu_finit(fpu);
 			err = -1;
 		} else {
 			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
-			set_used_math();
 		}
 
+		set_used_math();
 		if (use_eager_fpu()) {
 			preempt_disable();
 			math_state_restore();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e0b794a..106c015 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4950,7 +4950,8 @@
 			goto done;
 		}
 	}
-	ctxt->dst.orig_val = ctxt->dst.val;
+	/* Copy full 64-bit value for CMPXCHG8B.  */
+	ctxt->dst.orig_val64 = ctxt->dst.val64;
 
 special_insn:
 
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index cc31f7c..9541ba3 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -507,6 +507,7 @@
 		return -EOPNOTSUPP;
 
 	if (len != 1) {
+		memset(val, 0, len);
 		pr_pic_unimpl("non byte read\n");
 		return 0;
 	}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e55b5fc..bd4e34d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1572,7 +1572,7 @@
 		apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
 	}
 	apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
-	apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
+	apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
 	apic->highest_isr_cache = -1;
 	update_divide_count(apic);
 	atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@
 	update_divide_count(apic);
 	start_apic_timer(apic);
 	apic->irr_pending = true;
-	apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+	apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
 				1 : count_vectors(apic->regs + APIC_ISR);
 	apic->highest_isr_cache = -1;
 	if (kvm_x86_ops->hwapic_irr_update)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c..cc618c8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3649,11 +3649,6 @@
 	return;
 }
 
-static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
-{
-	return;
-}
-
 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
 	return;
@@ -4403,7 +4398,6 @@
 	.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
 	.vm_has_apicv = svm_vm_has_apicv,
 	.load_eoi_exitmap = svm_load_eoi_exitmap,
-	.hwapic_isr_update = svm_hwapic_isr_update,
 	.sync_pir_to_irr = svm_sync_pir_to_irr,
 
 	.set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18..10a481b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2168,7 +2168,10 @@
 {
 	unsigned long *msr_bitmap;
 
-	if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
+	if (is_guest_mode(vcpu))
+		msr_bitmap = vmx_msr_bitmap_nested;
+	else if (irqchip_in_kernel(vcpu->kvm) &&
+		apic_x2apic_mode(vcpu->arch.apic)) {
 		if (is_long_mode(vcpu))
 			msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
 		else
@@ -4367,6 +4370,18 @@
 	return 0;
 }
 
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_SMP
+	if (vcpu->mode == IN_GUEST_MODE) {
+		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+				POSTED_INTR_VECTOR);
+		return true;
+	}
+#endif
+	return false;
+}
+
 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
 						int vector)
 {
@@ -4375,9 +4390,7 @@
 	if (is_guest_mode(vcpu) &&
 	    vector == vmx->nested.posted_intr_nv) {
 		/* the PIR and ON have been set by L1. */
-		if (vcpu->mode == IN_GUEST_MODE)
-			apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-				POSTED_INTR_VECTOR);
+		kvm_vcpu_trigger_posted_interrupt(vcpu);
 		/*
 		 * If a posted intr is not recognized by hardware,
 		 * we will accomplish it in the next vmentry.
@@ -4409,12 +4422,7 @@
 
 	r = pi_test_and_set_on(&vmx->pi_desc);
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
-#ifdef CONFIG_SMP
-	if (!r && (vcpu->mode == IN_GUEST_MODE))
-		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-				POSTED_INTR_VECTOR);
-	else
-#endif
+	if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
 		kvm_vcpu_kick(vcpu);
 }
 
@@ -9213,9 +9221,9 @@
 	}
 
 	if (cpu_has_vmx_msr_bitmap() &&
-	    exec_control & CPU_BASED_USE_MSR_BITMAPS &&
-	    nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) {
-		vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested));
+	    exec_control & CPU_BASED_USE_MSR_BITMAPS) {
+		nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
+		/* MSR_BITMAP will be set by following vmx_set_efer. */
 	} else
 		exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70b..32bf19e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2744,7 +2744,6 @@
 	case KVM_CAP_USER_NMI:
 	case KVM_CAP_REINJECT_CONTROL:
 	case KVM_CAP_IRQ_INJECT_STATUS:
-	case KVM_CAP_IRQFD:
 	case KVM_CAP_IOEVENTFD:
 	case KVM_CAP_IOEVENTFD_NO_LENGTH:
 	case KVM_CAP_PIT2:
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 4a0890f..08f41ca 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -1,6 +1,6 @@
 config LGUEST_GUEST
 	bool "Lguest guest support"
-	depends on X86_32 && PARAVIRT
+	depends on X86_32 && PARAVIRT && PCI
 	select TTY
 	select VIRTUALIZATION
 	select VIRTIO
@@ -8,7 +8,7 @@
 	help
 	  Lguest is a tiny in-kernel hypervisor.  Selecting this will
 	  allow your kernel to boot under lguest.  This option will increase
-	  your kernel size by about 6k.  If in doubt, say N.
+	  your kernel size by about 10k.  If in doubt, say N.
 
 	  If you say Y here, make sure you say Y (or M) to the virtio block
 	  and net drivers which lguest needs.
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 553c094..a110efc 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -238,6 +238,31 @@
 	}
 }
 
+static const char *page_size_string(struct map_range *mr)
+{
+	static const char str_1g[] = "1G";
+	static const char str_2m[] = "2M";
+	static const char str_4m[] = "4M";
+	static const char str_4k[] = "4k";
+
+	if (mr->page_size_mask & (1<<PG_LEVEL_1G))
+		return str_1g;
+	/*
+	 * 32-bit without PAE has a 4M large page size.
+	 * PG_LEVEL_2M is misnamed, but we can at least
+	 * print out the right size in the string.
+	 */
+	if (IS_ENABLED(CONFIG_X86_32) &&
+	    !IS_ENABLED(CONFIG_X86_PAE) &&
+	    mr->page_size_mask & (1<<PG_LEVEL_2M))
+		return str_4m;
+
+	if (mr->page_size_mask & (1<<PG_LEVEL_2M))
+		return str_2m;
+
+	return str_4k;
+}
+
 static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 				     unsigned long start,
 				     unsigned long end)
@@ -333,8 +358,7 @@
 	for (i = 0; i < nr_range; i++)
 		printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
 				mr[i].start, mr[i].end - 1,
-			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+				page_size_string(&mr[i]));
 
 	return nr_range;
 }
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 919b912..df4552b 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -35,12 +35,12 @@
 	.flags = -1,
 };
 
-static unsigned int stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(void)
 {
-	unsigned int max = 0;
+	unsigned long max = 0;
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
-		max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
+		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
 	}
 
 	return max;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6ac2738..e469598 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -331,7 +331,7 @@
 				struct list_head *list)
 {
 	int ret;
-	struct resource_entry *entry;
+	struct resource_entry *entry, *tmp;
 
 	sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
 	info->bridge = device;
@@ -345,8 +345,13 @@
 		dev_dbg(&device->dev,
 			"no IO and memory resources present in _CRS\n");
 	else
-		resource_list_for_each_entry(entry, list)
-			entry->res->name = info->name;
+		resource_list_for_each_entry_safe(entry, tmp, list) {
+			if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
+			    (entry->res->flags & IORESOURCE_DISABLED))
+				resource_list_destroy_entry(entry);
+			else
+				entry->res->name = info->name;
+		}
 }
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 85afde1..a62e0be 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -5,6 +5,7 @@
 obj-y	+= goldfish/
 obj-y	+= iris/
 obj-y	+= intel-mid/
+obj-y	+= intel-quark/
 obj-y	+= olpc/
 obj-y	+= scx200/
 obj-y	+= sfi/
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 5fcda72..86d0f9e 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -91,167 +91,6 @@
 	ret
 ENDPROC(efi_call)
 
-#ifdef CONFIG_EFI_MIXED
-
-/*
- * We run this function from the 1:1 mapping.
- *
- * This function must be invoked with a 1:1 mapped stack.
- */
-ENTRY(__efi64_thunk)
-	movl	%ds, %eax
-	push	%rax
-	movl	%es, %eax
-	push	%rax
-	movl	%ss, %eax
-	push	%rax
-
-	subq	$32, %rsp
-	movl	%esi, 0x0(%rsp)
-	movl	%edx, 0x4(%rsp)
-	movl	%ecx, 0x8(%rsp)
-	movq	%r8, %rsi
-	movl	%esi, 0xc(%rsp)
-	movq	%r9, %rsi
-	movl	%esi,  0x10(%rsp)
-
-	sgdt	save_gdt(%rip)
-
-	leaq	1f(%rip), %rbx
-	movq	%rbx, func_rt_ptr(%rip)
-
-	/* Switch to gdt with 32-bit segments */
-	movl	64(%rsp), %eax
-	lgdt	(%rax)
-
-	leaq	efi_enter32(%rip), %rax
-	pushq	$__KERNEL_CS
-	pushq	%rax
-	lretq
-
-1:	addq	$32, %rsp
-
-	lgdt	save_gdt(%rip)
-
-	pop	%rbx
-	movl	%ebx, %ss
-	pop	%rbx
-	movl	%ebx, %es
-	pop	%rbx
-	movl	%ebx, %ds
-
-	/*
-	 * Convert 32-bit status code into 64-bit.
-	 */
-	test	%rax, %rax
-	jz	1f
-	movl	%eax, %ecx
-	andl	$0x0fffffff, %ecx
-	andl	$0xf0000000, %eax
-	shl	$32, %rax
-	or	%rcx, %rax
-1:
-	ret
-ENDPROC(__efi64_thunk)
-
-ENTRY(efi_exit32)
-	movq	func_rt_ptr(%rip), %rax
-	push	%rax
-	mov	%rdi, %rax
-	ret
-ENDPROC(efi_exit32)
-
-	.code32
-/*
- * EFI service pointer must be in %edi.
- *
- * The stack should represent the 32-bit calling convention.
- */
-ENTRY(efi_enter32)
-	movl	$__KERNEL_DS, %eax
-	movl	%eax, %ds
-	movl	%eax, %es
-	movl	%eax, %ss
-
-	/* Reload pgtables */
-	movl	%cr3, %eax
-	movl	%eax, %cr3
-
-	/* Disable paging */
-	movl	%cr0, %eax
-	btrl	$X86_CR0_PG_BIT, %eax
-	movl	%eax, %cr0
-
-	/* Disable long mode via EFER */
-	movl	$MSR_EFER, %ecx
-	rdmsr
-	btrl	$_EFER_LME, %eax
-	wrmsr
-
-	call	*%edi
-
-	/* We must preserve return value */
-	movl	%eax, %edi
-
-	/*
-	 * Some firmware will return with interrupts enabled. Be sure to
-	 * disable them before we switch GDTs.
-	 */
-	cli
-
-	movl	68(%esp), %eax
-	movl	%eax, 2(%eax)
-	lgdtl	(%eax)
-
-	movl	%cr4, %eax
-	btsl	$(X86_CR4_PAE_BIT), %eax
-	movl	%eax, %cr4
-
-	movl	%cr3, %eax
-	movl	%eax, %cr3
-
-	movl	$MSR_EFER, %ecx
-	rdmsr
-	btsl	$_EFER_LME, %eax
-	wrmsr
-
-	xorl	%eax, %eax
-	lldt	%ax
-
-	movl	72(%esp), %eax
-	pushl	$__KERNEL_CS
-	pushl	%eax
-
-	/* Enable paging */
-	movl	%cr0, %eax
-	btsl	$X86_CR0_PG_BIT, %eax
-	movl	%eax, %cr0
-	lret
-ENDPROC(efi_enter32)
-
-	.data
-	.balign	8
-	.global	efi32_boot_gdt
-efi32_boot_gdt:	.word	0
-		.quad	0
-
-save_gdt:	.word	0
-		.quad	0
-func_rt_ptr:	.quad	0
-
-	.global efi_gdt64
-efi_gdt64:
-	.word	efi_gdt64_end - efi_gdt64
-	.long	0			/* Filled out by user */
-	.word	0
-	.quad	0x0000000000000000	/* NULL descriptor */
-	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
-	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
-	.quad	0x0080890000000000	/* TS descriptor */
-	.quad   0x0000000000000000	/* TS continued */
-efi_gdt64_end:
-#endif /* CONFIG_EFI_MIXED */
-
 	.data
 ENTRY(efi_scratch)
 	.fill 3,8,0
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index 8806fa7..ff85d28 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -1,9 +1,26 @@
 /*
  * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ *
+ * Support for invoking 32-bit EFI runtime services from a 64-bit
+ * kernel.
+ *
+ * The below thunking functions are only used after ExitBootServices()
+ * has been called. This simplifies things considerably as compared with
+ * the early EFI thunking because we can leave all the kernel state
+ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
+ * services from __KERNEL32_CS. This means we can continue to service
+ * interrupts across an EFI mixed mode call.
+ *
+ * We do however, need to handle the fact that we're running in a full
+ * 64-bit virtual address space. Things like the stack and instruction
+ * addresses need to be accessible by the 32-bit firmware, so we rely on
+ * using the identity mappings in the EFI page table to access the stack
+ * and kernel text (see efi_setup_page_tables()).
  */
 
 #include <linux/linkage.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
 	.text
 	.code64
@@ -33,14 +50,6 @@
 	leaq	efi_exit32(%rip), %rbx
 	subq	%rax, %rbx
 	movl	%ebx, 8(%rsp)
-	leaq	efi_gdt64(%rip), %rbx
-	subq	%rax, %rbx
-	movl	%ebx, 2(%ebx)
-	movl	%ebx, 4(%rsp)
-	leaq	efi_gdt32(%rip), %rbx
-	subq	%rax, %rbx
-	movl	%ebx, 2(%ebx)
-	movl	%ebx, (%rsp)
 
 	leaq	__efi64_thunk(%rip), %rbx
 	subq	%rax, %rbx
@@ -52,14 +61,92 @@
 	retq
 ENDPROC(efi64_thunk)
 
-	.data
-efi_gdt32:
-	.word 	efi_gdt32_end - efi_gdt32
-	.long	0			/* Filled out above */
-	.word	0
-	.quad	0x0000000000000000	/* NULL descriptor */
-	.quad	0x00cf9a000000ffff	/* __KERNEL_CS */
-	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
-efi_gdt32_end:
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+	movl	%ds, %eax
+	push	%rax
+	movl	%es, %eax
+	push	%rax
+	movl	%ss, %eax
+	push	%rax
 
+	subq	$32, %rsp
+	movl	%esi, 0x0(%rsp)
+	movl	%edx, 0x4(%rsp)
+	movl	%ecx, 0x8(%rsp)
+	movq	%r8, %rsi
+	movl	%esi, 0xc(%rsp)
+	movq	%r9, %rsi
+	movl	%esi,  0x10(%rsp)
+
+	leaq	1f(%rip), %rbx
+	movq	%rbx, func_rt_ptr(%rip)
+
+	/* Switch to 32-bit descriptor */
+	pushq	$__KERNEL32_CS
+	leaq	efi_enter32(%rip), %rax
+	pushq	%rax
+	lretq
+
+1:	addq	$32, %rsp
+
+	pop	%rbx
+	movl	%ebx, %ss
+	pop	%rbx
+	movl	%ebx, %es
+	pop	%rbx
+	movl	%ebx, %ds
+
+	/*
+	 * Convert 32-bit status code into 64-bit.
+	 */
+	test	%rax, %rax
+	jz	1f
+	movl	%eax, %ecx
+	andl	$0x0fffffff, %ecx
+	andl	$0xf0000000, %eax
+	shl	$32, %rax
+	or	%rcx, %rax
+1:
+	ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+	movq	func_rt_ptr(%rip), %rax
+	push	%rax
+	mov	%rdi, %rax
+	ret
+ENDPROC(efi_exit32)
+
+	.code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+	movl	$__KERNEL_DS, %eax
+	movl	%eax, %ds
+	movl	%eax, %es
+	movl	%eax, %ss
+
+	call	*%edi
+
+	/* We must preserve return value */
+	movl	%eax, %edi
+
+	movl	72(%esp), %eax
+	pushl	$__KERNEL_CS
+	pushl	%eax
+
+	lret
+ENDPROC(efi_enter32)
+
+	.data
+	.balign	8
+func_rt_ptr:		.quad 0
 efi_saved_sp:		.quad 0
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 1bbedc4..3005f0c 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -130,7 +130,7 @@
 		intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
 	else {
 		intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-		pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+		pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
 	}
 
 out:
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644
index 0000000..9cc57ed
--- /dev/null
+++ b/arch/x86/platform/intel-quark/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IMR) += imr.o
+obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644
index 0000000..0ee619f
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -0,0 +1,661 @@
+/**
+ * imr.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR registers define an isolated region of memory that can
+ * be masked to prohibit certain system agents from accessing memory.
+ * When a device behind a masked port performs an access - snooped or
+ * not, an IMR may optionally prevent that transaction from changing
+ * the state of memory or from getting correct data in response to the
+ * operation.
+ *
+ * Write data will be dropped and reads will return 0xFFFFFFFF, the
+ * system will reset and system BIOS will print out an error message to
+ * inform the user that an IMR has been violated.
+ *
+ * This code is based on the Linux MTRR code and reference code from
+ * Intel's Quark BSP EFI, Linux and grub code.
+ *
+ * See quark-x1000-datasheet.pdf for register definitions.
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/iosf_mbi.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct imr_device {
+	struct dentry	*file;
+	bool		init;
+	struct mutex	lock;
+	int		max_imr;
+	int		reg_base;
+};
+
+static struct imr_device imr_dev;
+
+/*
+ * IMR read/write mask control registers.
+ * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
+ * bit definitions.
+ *
+ * addr_hi
+ * 31		Lock bit
+ * 30:24	Reserved
+ * 23:2		1 KiB aligned lo address
+ * 1:0		Reserved
+ *
+ * addr_hi
+ * 31:24	Reserved
+ * 23:2		1 KiB aligned hi address
+ * 1:0		Reserved
+ */
+#define IMR_LOCK	BIT(31)
+
+struct imr_regs {
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 rmask;
+	u32 wmask;
+};
+
+#define IMR_NUM_REGS	(sizeof(struct imr_regs)/sizeof(u32))
+#define IMR_SHIFT	8
+#define imr_to_phys(x)	((x) << IMR_SHIFT)
+#define phys_to_imr(x)	((x) >> IMR_SHIFT)
+
+/**
+ * imr_is_enabled - true if an IMR is enabled false otherwise.
+ *
+ * Determines if an IMR is enabled based on address range and read/write
+ * mask. An IMR set with an address range set to zero and a read/write
+ * access mask set to all is considered to be disabled. An IMR in any
+ * other state - for example set to zero but without read/write access
+ * all is considered to be enabled. This definition of disabled is how
+ * firmware switches off an IMR and is maintained in kernel for
+ * consistency.
+ *
+ * @imr:	pointer to IMR descriptor.
+ * @return:	true if IMR enabled false if disabled.
+ */
+static inline int imr_is_enabled(struct imr_regs *imr)
+{
+	return !(imr->rmask == IMR_READ_ACCESS_ALL &&
+		 imr->wmask == IMR_WRITE_ACCESS_ALL &&
+		 imr_to_phys(imr->addr_lo) == 0 &&
+		 imr_to_phys(imr->addr_hi) == 0);
+}
+
+/**
+ * imr_read - read an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ *
+ * @idev:	pointer to imr_device structure.
+ * @imr_id:	IMR entry to read.
+ * @imr:	IMR structure representing address and access masks.
+ * @return:	0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+	int ret;
+
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+				reg++, &imr->addr_lo);
+	if (ret)
+		return ret;
+
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+				reg++, &imr->addr_hi);
+	if (ret)
+		return ret;
+
+	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+				reg++, &imr->rmask);
+	if (ret)
+		return ret;
+
+	return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+				reg++, &imr->wmask);
+}
+
+/**
+ * imr_write - write an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ * Note lock bits need to be written independently of address bits.
+ *
+ * @idev:	pointer to imr_device structure.
+ * @imr_id:	IMR entry to write.
+ * @imr:	IMR structure representing address and access masks.
+ * @lock:	indicates if the IMR lock bit should be applied.
+ * @return:	0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_write(struct imr_device *idev, u32 imr_id,
+		     struct imr_regs *imr, bool lock)
+{
+	unsigned long flags;
+	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+	int ret;
+
+	local_irq_save(flags);
+
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
+				imr->addr_lo);
+	if (ret)
+		goto failed;
+
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+				reg++, imr->addr_hi);
+	if (ret)
+		goto failed;
+
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+				reg++, imr->rmask);
+	if (ret)
+		goto failed;
+
+	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+				reg++, imr->wmask);
+	if (ret)
+		goto failed;
+
+	/* Lock bit must be set separately to addr_lo address bits. */
+	if (lock) {
+		imr->addr_lo |= IMR_LOCK;
+		ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+					reg - IMR_NUM_REGS, imr->addr_lo);
+		if (ret)
+			goto failed;
+	}
+
+	local_irq_restore(flags);
+	return 0;
+failed:
+	/*
+	 * If writing to the IOSF failed then we're in an unknown state,
+	 * likely a very bad state. An IMR in an invalid state will almost
+	 * certainly lead to a memory access violation.
+	 */
+	local_irq_restore(flags);
+	WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
+	     imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
+
+	return ret;
+}
+
+/**
+ * imr_dbgfs_state_show - print state of IMR registers.
+ *
+ * @s:		pointer to seq_file for output.
+ * @unused:	unused parameter.
+ * @return:	0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
+{
+	phys_addr_t base;
+	phys_addr_t end;
+	int i;
+	struct imr_device *idev = s->private;
+	struct imr_regs imr;
+	size_t size;
+	int ret = -ENODEV;
+
+	mutex_lock(&idev->lock);
+
+	for (i = 0; i < idev->max_imr; i++) {
+
+		ret = imr_read(idev, i, &imr);
+		if (ret)
+			break;
+
+		/*
+		 * Remember to add IMR_ALIGN bytes to size to indicate the
+		 * inherent IMR_ALIGN size bytes contained in the masked away
+		 * lower ten bits.
+		 */
+		if (imr_is_enabled(&imr)) {
+			base = imr_to_phys(imr.addr_lo);
+			end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+		} else {
+			base = 0;
+			end = 0;
+		}
+		size = end - base;
+		seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
+			   "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
+			   &base, &end, size, imr.rmask, imr.wmask,
+			   imr_is_enabled(&imr) ? "enabled " : "disabled",
+			   imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
+	}
+
+	mutex_unlock(&idev->lock);
+	return ret;
+}
+
+/**
+ * imr_state_open - debugfs open callback.
+ *
+ * @inode:	pointer to struct inode.
+ * @file:	pointer to struct file.
+ * @return:	result of single open.
+ */
+static int imr_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, imr_dbgfs_state_show, inode->i_private);
+}
+
+static const struct file_operations imr_state_ops = {
+	.open		= imr_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * imr_debugfs_register - register debugfs hooks.
+ *
+ * @idev:	pointer to imr_device structure.
+ * @return:	0 on success - errno on failure.
+ */
+static int imr_debugfs_register(struct imr_device *idev)
+{
+	idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
+					 idev, &imr_state_ops);
+	return PTR_ERR_OR_ZERO(idev->file);
+}
+
+/**
+ * imr_debugfs_unregister - unregister debugfs hooks.
+ *
+ * @idev:	pointer to imr_device structure.
+ * @return:
+ */
+static void imr_debugfs_unregister(struct imr_device *idev)
+{
+	debugfs_remove(idev->file);
+}
+
+/**
+ * imr_check_params - check passed address range IMR alignment and non-zero size
+ *
+ * @base:	base address of intended IMR.
+ * @size:	size of intended IMR.
+ * @return:	zero on valid range -EINVAL on unaligned base/size.
+ */
+static int imr_check_params(phys_addr_t base, size_t size)
+{
+	if ((base & IMR_MASK) || (size & IMR_MASK)) {
+		pr_err("base %pa size 0x%08zx must align to 1KiB\n",
+			&base, size);
+		return -EINVAL;
+	}
+	if (size == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
+ *
+ * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
+ * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
+ * as a result.
+ *
+ * @size:	input size bytes.
+ * @return:	reduced size.
+ */
+static inline size_t imr_raw_size(size_t size)
+{
+	return size - IMR_ALIGN;
+}
+
+/**
+ * imr_address_overlap - detects an address overlap.
+ *
+ * @addr:	address to check against an existing IMR.
+ * @imr:	imr being checked.
+ * @return:	true for overlap false for no overlap.
+ */
+static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
+{
+	return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
+}
+
+/**
+ * imr_add_range - add an Isolated Memory Region.
+ *
+ * @base:	physical base address of region aligned to 1KiB.
+ * @size:	physical size of region in bytes must be aligned to 1KiB.
+ * @read_mask:	read access mask.
+ * @write_mask:	write access mask.
+ * @lock:	indicates whether or not to permanently lock this region.
+ * @return:	zero on success or negative value indicating error.
+ */
+int imr_add_range(phys_addr_t base, size_t size,
+		  unsigned int rmask, unsigned int wmask, bool lock)
+{
+	phys_addr_t end;
+	unsigned int i;
+	struct imr_device *idev = &imr_dev;
+	struct imr_regs imr;
+	size_t raw_size;
+	int reg;
+	int ret;
+
+	if (WARN_ONCE(idev->init == false, "driver not initialized"))
+		return -ENODEV;
+
+	ret = imr_check_params(base, size);
+	if (ret)
+		return ret;
+
+	/* Tweak the size value. */
+	raw_size = imr_raw_size(size);
+	end = base + raw_size;
+
+	/*
+	 * Check for reserved IMR value common to firmware, kernel and grub
+	 * indicating a disabled IMR.
+	 */
+	imr.addr_lo = phys_to_imr(base);
+	imr.addr_hi = phys_to_imr(end);
+	imr.rmask = rmask;
+	imr.wmask = wmask;
+	if (!imr_is_enabled(&imr))
+		return -ENOTSUPP;
+
+	mutex_lock(&idev->lock);
+
+	/*
+	 * Find a free IMR while checking for an existing overlapping range.
+	 * Note there's no restriction in silicon to prevent IMR overlaps.
+	 * For the sake of simplicity and ease in defining/debugging an IMR
+	 * memory map we exclude IMR overlaps.
+	 */
+	reg = -1;
+	for (i = 0; i < idev->max_imr; i++) {
+		ret = imr_read(idev, i, &imr);
+		if (ret)
+			goto failed;
+
+		/* Find overlap @ base or end of requested range. */
+		ret = -EINVAL;
+		if (imr_is_enabled(&imr)) {
+			if (imr_address_overlap(base, &imr))
+				goto failed;
+			if (imr_address_overlap(end, &imr))
+				goto failed;
+		} else {
+			reg = i;
+		}
+	}
+
+	/* Error out if we have no free IMR entries. */
+	if (reg == -1) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
+		 reg, &base, &end, raw_size, rmask, wmask);
+
+	/* Enable IMR at specified range and access mask. */
+	imr.addr_lo = phys_to_imr(base);
+	imr.addr_hi = phys_to_imr(end);
+	imr.rmask = rmask;
+	imr.wmask = wmask;
+
+	ret = imr_write(idev, reg, &imr, lock);
+	if (ret < 0) {
+		/*
+		 * In the highly unlikely event iosf_mbi_write failed
+		 * attempt to rollback the IMR setup skipping the trapping
+		 * of further IOSF write failures.
+		 */
+		imr.addr_lo = 0;
+		imr.addr_hi = 0;
+		imr.rmask = IMR_READ_ACCESS_ALL;
+		imr.wmask = IMR_WRITE_ACCESS_ALL;
+		imr_write(idev, reg, &imr, false);
+	}
+failed:
+	mutex_unlock(&idev->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(imr_add_range);
+
+/**
+ * __imr_remove_range - delete an Isolated Memory Region.
+ *
+ * This function allows you to delete an IMR by its index specified by reg or
+ * by address range specified by base and size respectively. If you specify an
+ * index on its own the base and size parameters are ignored.
+ * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
+ * imr_remove_range(-1, base, size); delete IMR from base to base+size.
+ *
+ * @reg:	imr index to remove.
+ * @base:	physical base address of region aligned to 1 KiB.
+ * @size:	physical size of region in bytes aligned to 1 KiB.
+ * @return:	-EINVAL on invalid range or out or range id
+ *		-ENODEV if reg is valid but no IMR exists or is locked
+ *		0 on success.
+ */
+static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
+{
+	phys_addr_t end;
+	bool found = false;
+	unsigned int i;
+	struct imr_device *idev = &imr_dev;
+	struct imr_regs imr;
+	size_t raw_size;
+	int ret = 0;
+
+	if (WARN_ONCE(idev->init == false, "driver not initialized"))
+		return -ENODEV;
+
+	/*
+	 * Validate address range if deleting by address, else we are
+	 * deleting by index where base and size will be ignored.
+	 */
+	if (reg == -1) {
+		ret = imr_check_params(base, size);
+		if (ret)
+			return ret;
+	}
+
+	/* Tweak the size value. */
+	raw_size = imr_raw_size(size);
+	end = base + raw_size;
+
+	mutex_lock(&idev->lock);
+
+	if (reg >= 0) {
+		/* If a specific IMR is given try to use it. */
+		ret = imr_read(idev, reg, &imr);
+		if (ret)
+			goto failed;
+
+		if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
+			ret = -ENODEV;
+			goto failed;
+		}
+		found = true;
+	} else {
+		/* Search for match based on address range. */
+		for (i = 0; i < idev->max_imr; i++) {
+			ret = imr_read(idev, i, &imr);
+			if (ret)
+				goto failed;
+
+			if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
+				continue;
+
+			if ((imr_to_phys(imr.addr_lo) == base) &&
+			    (imr_to_phys(imr.addr_hi) == end)) {
+				found = true;
+				reg = i;
+				break;
+			}
+		}
+	}
+
+	if (!found) {
+		ret = -ENODEV;
+		goto failed;
+	}
+
+	pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
+
+	/* Tear down the IMR. */
+	imr.addr_lo = 0;
+	imr.addr_hi = 0;
+	imr.rmask = IMR_READ_ACCESS_ALL;
+	imr.wmask = IMR_WRITE_ACCESS_ALL;
+
+	ret = imr_write(idev, reg, &imr, false);
+
+failed:
+	mutex_unlock(&idev->lock);
+	return ret;
+}
+
+/**
+ * imr_remove_range - delete an Isolated Memory Region by address
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by base and size respectively.
+ * imr_remove_range(base, size); delete IMR from base to base+size.
+ *
+ * @base:	physical base address of region aligned to 1 KiB.
+ * @size:	physical size of region in bytes aligned to 1 KiB.
+ * @return:	-EINVAL on invalid range or out or range id
+ *		-ENODEV if reg is valid but no IMR exists or is locked
+ *		0 on success.
+ */
+int imr_remove_range(phys_addr_t base, size_t size)
+{
+	return __imr_remove_range(-1, base, size);
+}
+EXPORT_SYMBOL_GPL(imr_remove_range);
+
+/**
+ * imr_clear - delete an Isolated Memory Region by index
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by the index of the IMR. Useful for initial sanitization of the IMR
+ * address map.
+ * imr_ge(base, size); delete IMR from base to base+size.
+ *
+ * @reg:	imr index to remove.
+ * @return:	-EINVAL on invalid range or out or range id
+ *		-ENODEV if reg is valid but no IMR exists or is locked
+ *		0 on success.
+ */
+static inline int imr_clear(int reg)
+{
+	return __imr_remove_range(reg, 0, 0);
+}
+
+/**
+ * imr_fixup_memmap - Tear down IMRs used during bootup.
+ *
+ * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
+ * that need to be removed before the kernel hands out one of the IMR
+ * encased addresses to a downstream DMA agent such as the SD or Ethernet.
+ * IMRs on Galileo are setup to immediately reset the system on violation.
+ * As a result if you're running a root filesystem from SD - you'll need
+ * the boot-time IMRs torn down or you'll find seemingly random resets when
+ * using your filesystem.
+ *
+ * @idev:	pointer to imr_device structure.
+ * @return:
+ */
+static void __init imr_fixup_memmap(struct imr_device *idev)
+{
+	phys_addr_t base = virt_to_phys(&_text);
+	size_t size = virt_to_phys(&__end_rodata) - base;
+	int i;
+	int ret;
+
+	/* Tear down all existing unlocked IMRs. */
+	for (i = 0; i < idev->max_imr; i++)
+		imr_clear(i);
+
+	/*
+	 * Setup a locked IMR around the physical extent of the kernel
+	 * from the beginning of the .text secton to the end of the
+	 * .rodata section as one physically contiguous block.
+	 */
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+	if (ret < 0) {
+		pr_err("unable to setup IMR for kernel: (%p - %p)\n",
+			&_text, &__end_rodata);
+	} else {
+		pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
+			size / 1024, &_text, &__end_rodata);
+	}
+
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+	{ X86_VENDOR_INTEL, 5, 9 },	/* Intel Quark SoC X1000. */
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, imr_ids);
+
+/**
+ * imr_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_init(void)
+{
+	struct imr_device *idev = &imr_dev;
+	int ret;
+
+	if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
+		return -ENODEV;
+
+	idev->max_imr = QUARK_X1000_IMR_MAX;
+	idev->reg_base = QUARK_X1000_IMR_REGBASE;
+	idev->init = true;
+
+	mutex_init(&idev->lock);
+	ret = imr_debugfs_register(idev);
+	if (ret != 0)
+		pr_warn("debugfs register failed!\n");
+	imr_fixup_memmap(idev);
+	return 0;
+}
+
+/**
+ * imr_exit - exit point for IMR code.
+ *
+ * Deregisters debugfs, leave IMR state as-is.
+ *
+ * return:
+ */
+static void __exit imr_exit(void)
+{
+	imr_debugfs_unregister(&imr_dev);
+}
+
+module_init(imr_init);
+module_exit(imr_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644
index 0000000..c9a0838
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -0,0 +1,129 @@
+/**
+ * imr_selftest.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR self test. The purpose of this module is to run a set of tests on the
+ * IMR API to validate it's sanity. We check for overlapping, reserved
+ * addresses and setup/teardown sanity.
+ *
+ */
+
+#include <asm-generic/sections.h>
+#include <asm/imr.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define SELFTEST KBUILD_MODNAME ": "
+/**
+ * imr_self_test_result - Print result string for self test.
+ *
+ * @res:	result code - true if test passed false otherwise.
+ * @fmt:	format string.
+ * ...		variadic argument list.
+ */
+static void __init imr_self_test_result(int res, const char *fmt, ...)
+{
+	va_list vlist;
+
+	/* Print pass/fail. */
+	if (res)
+		pr_info(SELFTEST "pass ");
+	else
+		pr_info(SELFTEST "fail ");
+
+	/* Print variable string. */
+	va_start(vlist, fmt);
+	vprintk(fmt, vlist);
+	va_end(vlist);
+
+	/* Optional warning. */
+	WARN(res == 0, "test failed");
+}
+#undef SELFTEST
+
+/**
+ * imr_self_test
+ *
+ * Verify IMR self_test with some simple tests to verify overlap,
+ * zero sized allocations and 1 KiB sized areas.
+ *
+ */
+static void __init imr_self_test(void)
+{
+	phys_addr_t base  = virt_to_phys(&_text);
+	size_t size = virt_to_phys(&__end_rodata) - base;
+	const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+	int ret;
+
+	/* Test zero zero. */
+	ret = imr_add_range(0, 0, 0, 0, false);
+	imr_self_test_result(ret < 0, "zero sized IMR\n");
+
+	/* Test exact overlap. */
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+	/* Test overlap with base inside of existing. */
+	base += size - IMR_ALIGN;
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+	/* Test overlap with end inside of existing. */
+	base -= size + IMR_ALIGN * 2;
+	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+	imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+	/* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
+	ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
+			    IMR_WRITE_ACCESS_ALL, false);
+	imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
+
+	/* Test that a 1 KiB IMR @ zero with CPU only will work. */
+	ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
+	imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
+	if (ret >= 0) {
+		ret = imr_remove_range(0, IMR_ALIGN);
+		imr_self_test_result(ret == 0, "teardown - cpu-access\n");
+	}
+
+	/* Test 2 KiB works. */
+	size = IMR_ALIGN * 2;
+	ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
+			    IMR_WRITE_ACCESS_ALL, false);
+	imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
+	if (ret >= 0) {
+		ret = imr_remove_range(0, size);
+		imr_self_test_result(ret == 0, "teardown 2KiB\n");
+	}
+}
+
+/**
+ * imr_self_test_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_self_test_init(void)
+{
+	imr_self_test();
+	return 0;
+}
+
+/**
+ * imr_self_test_exit - exit point for IMR code.
+ *
+ * return:
+ */
+static void __exit imr_self_test_exit(void)
+{
+}
+
+module_init(imr_self_test_init);
+module_exit(imr_self_test_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
index 31776d0..d7ec4e2 100644
--- a/arch/x86/vdso/vdso32/sigreturn.S
+++ b/arch/x86/vdso/vdso32/sigreturn.S
@@ -17,6 +17,7 @@
 	.text
 	.globl __kernel_sigreturn
 	.type __kernel_sigreturn,@function
+	nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
 	ALIGN
 __kernel_sigreturn:
 .LSTART_sigreturn:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bd8b845..5240f56 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1070,6 +1070,23 @@
 	BUG_ON(val);
 }
 #endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+	u64 val;
+
+	val = native_read_msr_safe(msr, err);
+	switch (msr) {
+	case MSR_IA32_APICBASE:
+#ifdef CONFIG_X86_X2APIC
+		if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
+#endif
+			val &= ~X2APIC_ENABLE;
+		break;
+	}
+	return val;
+}
+
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
 	int ret;
@@ -1240,7 +1257,7 @@
 
 	.wbinvd = native_wbinvd,
 
-	.read_msr = native_read_msr_safe,
+	.read_msr = xen_read_msr_safe,
 	.write_msr = xen_write_msr_safe,
 
 	.read_tsc = native_read_tsc,
@@ -1741,6 +1758,7 @@
 #ifdef CONFIG_X86_32
 	i386_start_kernel();
 #else
+	cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
 	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
 #endif
 }
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 740ae30..9f93af5 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -563,7 +563,7 @@
 		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
 			p2m_init(p2m);
 		else
-			p2m_init_identity(p2m, pfn);
+			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
 
 		spin_lock_irqsave(&p2m_update_lock, flags);
 
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23b45eb..956374c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -41,7 +41,7 @@
 static inline void check_zero(void)
 {
 	u8 ret;
-	u8 old = ACCESS_ONCE(zero_stats);
+	u8 old = READ_ONCE(zero_stats);
 	if (unlikely(old)) {
 		ret = cmpxchg(&zero_stats, old, 0);
 		/* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@
 	struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
 	int cpu = smp_processor_id();
 	u64 start;
+	__ticket_t head;
 	unsigned long flags;
 
 	/* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@
 	 */
 	__ticket_enter_slowpath(lock);
 
+	/* make sure enter_slowpath, which is atomic does not cross the read */
+	smp_mb__after_atomic();
+
 	/*
 	 * check again make sure it didn't become free while
 	 * we weren't looking
 	 */
-	if (ACCESS_ONCE(lock->tickets.head) == want) {
+	head = READ_ONCE(lock->tickets.head);
+	if (__tickets_equal(head, want)) {
 		add_stats(TAKEN_SLOW_PICKUP, 1);
 		goto out;
 	}
@@ -204,8 +209,8 @@
 		const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
 
 		/* Make sure we read lock before want */
-		if (ACCESS_ONCE(w->lock) == lock &&
-		    ACCESS_ONCE(w->want) == next) {
+		if (READ_ONCE(w->lock) == lock &&
+		    READ_ONCE(w->want) == next) {
 			add_stats(RELEASED_SLOW_KICKED, 1);
 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
 			break;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9273d09..5b9c6d5 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1292,6 +1292,9 @@
 	struct blkg_rwstat rwstat = { }, tmp;
 	int i, cpu;
 
+	if (tg->stats_cpu == NULL)
+		return 0;
+
 	for_each_possible_cpu(cpu) {
 		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 02e835f..37fb190 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -65,6 +65,7 @@
 
 struct lpss_device_desc {
 	unsigned int flags;
+	const char *clk_con_id;
 	unsigned int prv_offset;
 	size_t prv_size_override;
 	void (*setup)(struct lpss_private_data *pdata);
@@ -105,7 +106,7 @@
 	}
 }
 
-static void byt_i2c_setup(struct lpss_private_data *pdata)
+static void lpss_deassert_reset(struct lpss_private_data *pdata)
 {
 	unsigned int offset;
 	u32 val;
@@ -114,9 +115,18 @@
 	val = readl(pdata->mmio_base + offset);
 	val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
 	writel(val, pdata->mmio_base + offset);
+}
+
+#define LPSS_I2C_ENABLE			0x6c
+
+static void byt_i2c_setup(struct lpss_private_data *pdata)
+{
+	lpss_deassert_reset(pdata);
 
 	if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
 		pdata->fixed_clk_rate = 133000000;
+
+	writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
@@ -125,12 +135,13 @@
 };
 
 static struct lpss_device_desc lpt_i2c_dev_desc = {
-	.flags = LPSS_CLK | LPSS_LTR,
+	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
 	.prv_offset = 0x800,
 };
 
 static struct lpss_device_desc lpt_uart_dev_desc = {
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
+	.clk_con_id = "baudclk",
 	.prv_offset = 0x800,
 	.setup = lpss_uart_setup,
 };
@@ -147,6 +158,7 @@
 
 static struct lpss_device_desc byt_uart_dev_desc = {
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+	.clk_con_id = "baudclk",
 	.prv_offset = 0x800,
 	.setup = lpss_uart_setup,
 };
@@ -166,6 +178,12 @@
 	.setup = byt_i2c_setup,
 };
 
+static struct lpss_device_desc bsw_spi_dev_desc = {
+	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+	.prv_offset = 0x400,
+	.setup = lpss_deassert_reset,
+};
+
 #else
 
 #define LPSS_ADDR(desc) (0UL)
@@ -198,7 +216,7 @@
 	/* Braswell LPSS devices */
 	{ "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
 	{ "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
-	{ "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
+	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
 	{ "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
 
 	{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
@@ -298,7 +316,7 @@
 		return PTR_ERR(clk);
 
 	pdata->clk = clk;
-	clk_register_clkdev(clk, NULL, devname);
+	clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
 	return 0;
 }
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 982b67f..a8dd2f7 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -680,7 +680,7 @@
 		/* Enable GPE for event processing (SCI_EVT=1) */
 		if (!resuming)
 			acpi_ec_submit_request(ec);
-		pr_info("+++++ EC started +++++\n");
+		pr_debug("EC started\n");
 	}
 	spin_unlock_irqrestore(&ec->lock, flags);
 }
@@ -712,7 +712,7 @@
 			acpi_ec_complete_request(ec);
 		clear_bit(EC_FLAGS_STARTED, &ec->flags);
 		clear_bit(EC_FLAGS_STOPPED, &ec->flags);
-		pr_info("+++++ EC stopped +++++\n");
+		pr_debug("EC stopped\n");
 	}
 	spin_unlock_irqrestore(&ec->lock, flags);
 }
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 4752b99..5589a6e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -42,11 +42,13 @@
 	 * CHECKME: len might be required to check versus a minimum
 	 * length as well. 1 for io is fine, but for memory it does
 	 * not make any sense at all.
+	 * Note: some BIOSes report incorrect length for ACPI address space
+	 * descriptor, so remove check of 'reslen == len' to avoid regression.
 	 */
-	if (len && reslen && reslen == len && start <= end)
+	if (len && reslen && start <= end)
 		return true;
 
-	pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+	pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
 		io ? "io" : "mem", start, end, len);
 
 	return false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 88a4f99..26eb70c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -540,6 +540,15 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
 		},
 	},
+	{
+	 /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
+	 .callback = video_disable_native_backlight,
+	 .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
+		},
+	},
 
 	{
 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
@@ -2101,7 +2110,8 @@
 
 int acpi_video_register(void)
 {
-	int result = 0;
+	int ret;
+
 	if (register_count) {
 		/*
 		 * if the function of acpi_video_register is already called,
@@ -2113,9 +2123,9 @@
 	mutex_init(&video_list_lock);
 	INIT_LIST_HEAD(&video_bus_head);
 
-	result = acpi_bus_register_driver(&acpi_video_bus);
-	if (result < 0)
-		return -ENODEV;
+	ret = acpi_bus_register_driver(&acpi_video_bus);
+	if (ret)
+		return ret;
 
 	/*
 	 * When the acpi_video_bus is loaded successfully, increase
@@ -2167,6 +2177,17 @@
 
 static int __init acpi_video_init(void)
 {
+	/*
+	 * Let the module load even if ACPI is disabled (e.g. due to
+	 * a broken BIOS) so that i915.ko can still be loaded on such
+	 * old systems without an AcpiOpRegion.
+	 *
+	 * acpi_video_register() will report -ENODEV later as well due
+	 * to acpi_disabled when i915.ko tries to register itself afterwards.
+	 */
+	if (acpi_disabled)
+		return 0;
+
 	dmi_check_system(video_dmi_table);
 
 	if (intel_opregion_present())
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 33b09b6..6607f3c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -551,7 +551,6 @@
 {
 	void *page_addr;
 	unsigned long user_page_addr;
-	struct vm_struct tmp_area;
 	struct page **page;
 	struct mm_struct *mm;
 
@@ -600,10 +599,11 @@
 				proc->pid, page_addr);
 			goto err_alloc_page_failed;
 		}
-		tmp_area.addr = page_addr;
-		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-		ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
-		if (ret) {
+		ret = map_kernel_range_noflush((unsigned long)page_addr,
+					PAGE_SIZE, PAGE_KERNEL, page);
+		flush_cache_vmap((unsigned long)page_addr,
+				(unsigned long)page_addr + PAGE_SIZE);
+		if (ret != 1) {
 			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
 			       proc->pid, page_addr);
 			goto err_map_kernel_failed;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index f9054cd..5389579 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -869,6 +869,8 @@
 	 */
 	ata_msleep(ap, 1);
 
+	sata_set_spd(link);
+
 	/*
 	 * Now, bring the host controller online again, this can take time
 	 * as PHY reset and communication establishment, 1st D2H FIS and
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba4abbe..45937f8 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2242,7 +2242,7 @@
 }
 
 static int pm_genpd_summary_one(struct seq_file *s,
-		struct generic_pm_domain *gpd)
+				struct generic_pm_domain *genpd)
 {
 	static const char * const status_lookup[] = {
 		[GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@
 	struct gpd_link *link;
 	int ret;
 
-	ret = mutex_lock_interruptible(&gpd->lock);
+	ret = mutex_lock_interruptible(&genpd->lock);
 	if (ret)
 		return -ERESTARTSYS;
 
-	if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
 		goto exit;
-	seq_printf(s, "%-30s  %-15s  ", gpd->name, status_lookup[gpd->status]);
+	seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]);
 
 	/*
 	 * Modifications on the list require holding locks on both
 	 * master and slave, so we are safe.
-	 * Also gpd->name is immutable.
+	 * Also genpd->name is immutable.
 	 */
-	list_for_each_entry(link, &gpd->master_links, master_node) {
+	list_for_each_entry(link, &genpd->master_links, master_node) {
 		seq_printf(s, "%s", link->slave->name);
-		if (!list_is_last(&link->master_node, &gpd->master_links))
+		if (!list_is_last(&link->master_node, &genpd->master_links))
 			seq_puts(s, ", ");
 	}
 
-	list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
 		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
 		if (kobj_path == NULL)
 			continue;
@@ -2287,14 +2287,14 @@
 
 	seq_puts(s, "\n");
 exit:
-	mutex_unlock(&gpd->lock);
+	mutex_unlock(&genpd->lock);
 
 	return 0;
 }
 
 static int pm_genpd_summary_show(struct seq_file *s, void *data)
 {
-	struct generic_pm_domain *gpd;
+	struct generic_pm_domain *genpd;
 	int ret = 0;
 
 	seq_puts(s, "    domain                      status         slaves\n");
@@ -2305,8 +2305,8 @@
 	if (ret)
 		return -ERESTARTSYS;
 
-	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
-		ret = pm_genpd_summary_one(s, gpd);
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+		ret = pm_genpd_summary_one(s, genpd);
 		if (ret)
 			break;
 	}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index c2744b3..aab7158 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -730,6 +730,7 @@
 	pm_abort_suspend = true;
 	freeze_wake();
 }
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
 void pm_wakeup_clear(void)
 {
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index d453a2c..81751a4 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -307,7 +307,7 @@
 	if (pos == 0) {
 		memmove(blk + offset * map->cache_word_size,
 			blk, rbnode->blklen * map->cache_word_size);
-		bitmap_shift_right(present, present, offset, blklen);
+		bitmap_shift_left(present, present, offset, blklen);
 	}
 
 	/* update the rbnode block, its size and the base register */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f373c35..da84f54 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -608,7 +608,8 @@
 	for (i = start; i < end; i++) {
 		regtmp = block_base + (i * map->reg_stride);
 
-		if (!regcache_reg_present(cache_present, i))
+		if (!regcache_reg_present(cache_present, i) ||
+		    !regmap_writeable(map, regtmp))
 			continue;
 
 		val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@
 	for (i = start; i < end; i++) {
 		regtmp = block_base + (i * map->reg_stride);
 
-		if (!regcache_reg_present(cache_present, i)) {
+		if (!regcache_reg_present(cache_present, i) ||
+		    !regmap_writeable(map, regtmp)) {
 			ret = regcache_sync_block_raw_flush(map, &data,
 							    base, regtmp);
 			if (ret != 0)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6299a50..a6c3f75 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -499,7 +499,8 @@
 		goto err_alloc;
 	}
 
-	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
+				   irq_flags | IRQF_ONESHOT,
 				   chip->name, d);
 	if (ret != 0) {
 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cbdfbbf..ceb32dd 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -37,17 +37,18 @@
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
+#define NVME_MINORS		(1U << MINORBITS)
 #define NVME_Q_DEPTH		1024
 #define NVME_AQ_DEPTH		64
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT		(admin_timeout * HZ)
 #define SHUTDOWN_TIMEOUT	(shutdown_timeout * HZ)
-#define IOD_TIMEOUT		(retry_time * HZ)
 
 static unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char retry_time = 30;
-module_param(retry_time, byte, 0644);
-MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
-
 static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
+static int nvme_char_major;
+module_param(nvme_char_major, int, 0);
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -76,7 +76,8 @@
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
+
+static struct class *nvme_class;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@
  * commands and one for I/O commands).
  */
 struct nvme_queue {
-	struct llist_node node;
 	struct device *q_dmadev;
 	struct nvme_dev *dev;
 	char irqname[24];	/* nvme4294967295-65535\0 */
@@ -482,6 +482,115 @@
 	}
 }
 
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+	if (be32_to_cpu(pi->ref_tag) == v)
+		pi->ref_tag = cpu_to_be32(p);
+}
+
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+	if (be32_to_cpu(pi->ref_tag) == p)
+		pi->ref_tag = cpu_to_be32(v);
+}
+
+/**
+ * nvme_dif_remap - remaps ref tags to bip seed and physical lba
+ *
+ * The virtual start sector is the one that was originally submitted by the
+ * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
+ * start sector may be different. Remap protection information to match the
+ * physical LBA on writes, and back to the original seed on reads.
+ *
+ * Type 0 and 3 do not have a ref tag, so no remapping required.
+ */
+static void nvme_dif_remap(struct request *req,
+			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+	struct nvme_ns *ns = req->rq_disk->private_data;
+	struct bio_integrity_payload *bip;
+	struct t10_pi_tuple *pi;
+	void *p, *pmap;
+	u32 i, nlb, ts, phys, virt;
+
+	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
+		return;
+
+	bip = bio_integrity(req->bio);
+	if (!bip)
+		return;
+
+	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
+	if (!pmap)
+		return;
+
+	p = pmap;
+	virt = bip_get_seed(bip);
+	phys = nvme_block_nr(ns, blk_rq_pos(req));
+	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
+	ts = ns->disk->integrity->tuple_size;
+
+	for (i = 0; i < nlb; i++, virt++, phys++) {
+		pi = (struct t10_pi_tuple *)p;
+		dif_swap(phys, virt, pi);
+		p += ts;
+	}
+	kunmap_atomic(pmap);
+}
+
+static int nvme_noop_verify(struct blk_integrity_iter *iter)
+{
+	return 0;
+}
+
+static int nvme_noop_generate(struct blk_integrity_iter *iter)
+{
+	return 0;
+}
+
+struct blk_integrity nvme_meta_noop = {
+	.name			= "NVME_META_NOOP",
+	.generate_fn		= nvme_noop_generate,
+	.verify_fn		= nvme_noop_verify,
+};
+
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+	struct blk_integrity integrity;
+
+	switch (ns->pi_type) {
+	case NVME_NS_DPS_PI_TYPE3:
+		integrity = t10_pi_type3_crc;
+		break;
+	case NVME_NS_DPS_PI_TYPE1:
+	case NVME_NS_DPS_PI_TYPE2:
+		integrity = t10_pi_type1_crc;
+		break;
+	default:
+		integrity = nvme_meta_noop;
+		break;
+	}
+	integrity.tuple_size = ns->ms;
+	blk_integrity_register(ns->disk, &integrity);
+	blk_queue_max_integrity_segments(ns->queue, 1);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static void nvme_dif_remap(struct request *req,
+			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+}
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+}
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+}
+#endif
+
 static void req_completion(struct nvme_queue *nvmeq, void *ctx,
 						struct nvme_completion *cqe)
 {
@@ -512,9 +621,16 @@
 			"completing aborted command with status:%04x\n",
 			status);
 
-	if (iod->nents)
+	if (iod->nents) {
 		dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
 			rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (blk_integrity_rq(req)) {
+			if (!rq_data_dir(req))
+				nvme_dif_remap(req, nvme_dif_complete);
+			dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+				rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		}
+	}
 	nvme_free_iod(nvmeq->dev, iod);
 
 	blk_mq_complete_request(req);
@@ -670,6 +786,24 @@
 	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+	if (blk_integrity_rq(req)) {
+		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+		switch (ns->pi_type) {
+		case NVME_NS_DPS_PI_TYPE3:
+			control |= NVME_RW_PRINFO_PRCHK_GUARD;
+			break;
+		case NVME_NS_DPS_PI_TYPE1:
+		case NVME_NS_DPS_PI_TYPE2:
+			control |= NVME_RW_PRINFO_PRCHK_GUARD |
+					NVME_RW_PRINFO_PRCHK_REF;
+			cmnd->rw.reftag = cpu_to_le32(
+					nvme_block_nr(ns, blk_rq_pos(req)));
+			break;
+		}
+	} else if (ns->ms)
+		control |= NVME_RW_PRINFO_PRACT;
+
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
@@ -690,6 +824,19 @@
 	struct nvme_iod *iod;
 	enum dma_data_direction dma_dir;
 
+	/*
+	 * If formated with metadata, require the block layer provide a buffer
+	 * unless this namespace is formated such that the metadata can be
+	 * stripped/generated by the controller with PRACT=1.
+	 */
+	if (ns->ms && !blk_integrity_rq(req)) {
+		if (!(ns->pi_type && ns->ms == 8)) {
+			req->errors = -EFAULT;
+			blk_mq_complete_request(req);
+			return BLK_MQ_RQ_QUEUE_OK;
+		}
+	}
+
 	iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
 	if (!iod)
 		return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +872,21 @@
 					iod->nents, dma_dir);
 			goto retry_cmd;
 		}
+		if (blk_integrity_rq(req)) {
+			if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
+				goto error_cmd;
+
+			sg_init_table(iod->meta_sg, 1);
+			if (blk_rq_map_integrity_sg(
+					req->q, req->bio, iod->meta_sg) != 1)
+				goto error_cmd;
+
+			if (rq_data_dir(req))
+				nvme_dif_remap(req, nvme_dif_prep);
+
+			if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
+				goto error_cmd;
+		}
 	}
 
 	nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +979,6 @@
 	return IRQ_WAKE_THREAD;
 }
 
-static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
-								cmd_info)
-{
-	spin_lock_irq(&nvmeq->q_lock);
-	cancel_cmd_info(cmd_info, NULL);
-	spin_unlock_irq(&nvmeq->q_lock);
-}
-
 struct sync_cmd_info {
 	struct task_struct *task;
 	u32 result;
@@ -847,7 +1001,6 @@
 static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
 						u32 *result, unsigned timeout)
 {
-	int ret;
 	struct sync_cmd_info cmdinfo;
 	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
 	struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +1012,12 @@
 
 	nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
 
-	set_current_state(TASK_KILLABLE);
-	ret = nvme_submit_cmd(nvmeq, cmd);
-	if (ret) {
-		nvme_finish_cmd(nvmeq, req->tag, NULL);
-		set_current_state(TASK_RUNNING);
-	}
-	ret = schedule_timeout(timeout);
-
-	/*
-	 * Ensure that sync_completion has either run, or that it will
-	 * never run.
-	 */
-	nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
-
-	/*
-	 * We never got the completion
-	 */
-	if (cmdinfo.status == -EINTR)
-		return -EINTR;
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	nvme_submit_cmd(nvmeq, cmd);
+	schedule();
 
 	if (result)
 		*result = cmdinfo.result;
-
 	return cmdinfo.status;
 }
 
@@ -1158,29 +1294,18 @@
 	struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
 	struct nvme_queue *nvmeq = cmd->nvmeq;
 
+	dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+							nvmeq->qid);
+	spin_lock_irq(&nvmeq->q_lock);
+	nvme_abort_req(req);
+	spin_unlock_irq(&nvmeq->q_lock);
+
 	/*
 	 * The aborted req will be completed on receiving the abort req.
 	 * We enable the timer again. If hit twice, it'll cause a device reset,
 	 * as the device then is in a faulty state.
 	 */
-	int ret = BLK_EH_RESET_TIMER;
-
-	dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-							nvmeq->qid);
-
-	spin_lock_irq(&nvmeq->q_lock);
-	if (!nvmeq->dev->initialized) {
-		/*
-		 * Force cancelled command frees the request, which requires we
-		 * return BLK_EH_NOT_HANDLED.
-		 */
-		nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
-		ret = BLK_EH_NOT_HANDLED;
-	} else
-		nvme_abort_req(req);
-	spin_unlock_irq(&nvmeq->q_lock);
-
-	return ret;
+	return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1358,6 @@
 	struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
 
 	spin_lock_irq(&nvmeq->q_lock);
-	nvme_process_cq(nvmeq);
 	if (hctx && hctx->tags)
 		blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
 	spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1380,10 @@
 	}
 	if (!qid && dev->admin_q)
 		blk_mq_freeze_queue_start(dev->admin_q);
-	nvme_clear_queue(nvmeq);
+
+	spin_lock_irq(&nvmeq->q_lock);
+	nvme_process_cq(nvmeq);
+	spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +2002,24 @@
 	return 0;
 }
 
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+	u32 logical_block_size = queue_logical_block_size(ns->queue);
+	ns->queue->limits.discard_zeroes_data = 0;
+	ns->queue->limits.discard_alignment = logical_block_size;
+	ns->queue->limits.discard_granularity = logical_block_size;
+	ns->queue->limits.max_discard_sectors = 0xffffffff;
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
 static int nvme_revalidate_disk(struct gendisk *disk)
 {
 	struct nvme_ns *ns = disk->private_data;
 	struct nvme_dev *dev = ns->dev;
 	struct nvme_id_ns *id;
 	dma_addr_t dma_addr;
-	int lbaf;
+	int lbaf, pi_type, old_ms;
+	unsigned short bs;
 
 	id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
 								GFP_KERNEL);
@@ -1890,16 +2028,51 @@
 								__func__);
 		return 0;
 	}
+	if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
+		dev_warn(&dev->pci_dev->dev,
+			"identify failed ns:%d, setting capacity to 0\n",
+			ns->ns_id);
+		memset(id, 0, sizeof(*id));
+	}
 
-	if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
-		goto free;
-
-	lbaf = id->flbas & 0xf;
+	old_ms = ns->ms;
+	lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
 	ns->lba_shift = id->lbaf[lbaf].ds;
+	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
 
-	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
- free:
+	/*
+	 * If identify namespace failed, use default 512 byte block size so
+	 * block layer can use before failing read/write for 0 capacity.
+	 */
+	if (ns->lba_shift == 0)
+		ns->lba_shift = 9;
+	bs = 1 << ns->lba_shift;
+
+	/* XXX: PI implementation requires metadata equal t10 pi tuple size */
+	pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
+					id->dps & NVME_NS_DPS_PI_MASK : 0;
+
+	if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
+				ns->ms != old_ms ||
+				bs != queue_logical_block_size(disk->queue) ||
+				(ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+		blk_integrity_unregister(disk);
+
+	ns->pi_type = pi_type;
+	blk_queue_logical_block_size(ns->queue, bs);
+
+	if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
+				!(id->flbas & NVME_NS_FLBAS_META_EXT))
+		nvme_init_integrity(ns);
+
+	if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk)))
+		set_capacity(disk, 0);
+	else
+		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+	if (dev->oncs & NVME_CTRL_ONCS_DSM)
+		nvme_config_discard(ns);
+
 	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
 	return 0;
 }
@@ -1923,8 +2096,7 @@
 		spin_lock(&dev_list_lock);
 		list_for_each_entry_safe(dev, next, &dev_list, node) {
 			int i;
-			if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
-							dev->initialized) {
+			if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
 				if (work_busy(&dev->reset_work))
 					continue;
 				list_del_init(&dev->node);
@@ -1956,30 +2128,16 @@
 	return 0;
 }
 
-static void nvme_config_discard(struct nvme_ns *ns)
-{
-	u32 logical_block_size = queue_logical_block_size(ns->queue);
-	ns->queue->limits.discard_zeroes_data = 0;
-	ns->queue->limits.discard_alignment = logical_block_size;
-	ns->queue->limits.discard_granularity = logical_block_size;
-	ns->queue->limits.max_discard_sectors = 0xffffffff;
-	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
-}
-
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
-			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 {
 	struct nvme_ns *ns;
 	struct gendisk *disk;
 	int node = dev_to_node(&dev->pci_dev->dev);
-	int lbaf;
-
-	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
-		return NULL;
 
 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
 	if (!ns)
-		return NULL;
+		return;
+
 	ns->queue = blk_mq_init_queue(&dev->tagset);
 	if (IS_ERR(ns->queue))
 		goto out_free_ns;
@@ -1995,9 +2153,9 @@
 
 	ns->ns_id = nsid;
 	ns->disk = disk;
-	lbaf = id->flbas & 0xf;
-	ns->lba_shift = id->lbaf[lbaf].ds;
-	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+	list_add_tail(&ns->list, &dev->namespaces);
+
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 	if (dev->max_hw_sectors)
 		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2169,26 @@
 	disk->fops = &nvme_fops;
 	disk->private_data = ns;
 	disk->queue = ns->queue;
-	disk->driverfs_dev = &dev->pci_dev->dev;
+	disk->driverfs_dev = dev->device;
 	disk->flags = GENHD_FL_EXT_DEVT;
 	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
-	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
-	if (dev->oncs & NVME_CTRL_ONCS_DSM)
-		nvme_config_discard(ns);
-
-	return ns;
-
+	/*
+	 * Initialize capacity to 0 until we establish the namespace format and
+	 * setup integrity extentions if necessary. The revalidate_disk after
+	 * add_disk allows the driver to register with integrity if the format
+	 * requires it.
+	 */
+	set_capacity(disk, 0);
+	nvme_revalidate_disk(ns->disk);
+	add_disk(ns->disk);
+	if (ns->ms)
+		revalidate_disk(ns->disk);
+	return;
  out_free_queue:
 	blk_cleanup_queue(ns->queue);
  out_free_ns:
 	kfree(ns);
-	return NULL;
 }
 
 static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2313,20 @@
 	struct pci_dev *pdev = dev->pci_dev;
 	int res;
 	unsigned nn, i;
-	struct nvme_ns *ns;
 	struct nvme_id_ctrl *ctrl;
-	struct nvme_id_ns *id_ns;
 	void *mem;
 	dma_addr_t dma_addr;
 	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-	mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
+	mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
 	res = nvme_identify(dev, 0, 1, dma_addr);
 	if (res) {
 		dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
-		res = -EIO;
-		goto out;
+		dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+		return -EIO;
 	}
 
 	ctrl = mem;
@@ -2191,6 +2352,7 @@
 		} else
 			dev->max_hw_sectors = max_hw_sectors;
 	}
+	dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
 
 	dev->tagset.ops = &nvme_mq_ops;
 	dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2365,12 @@
 	dev->tagset.driver_data = dev;
 
 	if (blk_mq_alloc_tag_set(&dev->tagset))
-		goto out;
+		return 0;
 
-	id_ns = mem;
-	for (i = 1; i <= nn; i++) {
-		res = nvme_identify(dev, i, 0, dma_addr);
-		if (res)
-			continue;
+	for (i = 1; i <= nn; i++)
+		nvme_alloc_ns(dev, i);
 
-		if (id_ns->ncap == 0)
-			continue;
-
-		res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
-							dma_addr + 4096, NULL);
-		if (res)
-			memset(mem + 4096, 0, 4096);
-
-		ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
-		if (ns)
-			list_add_tail(&ns->list, &dev->namespaces);
-	}
-	list_for_each_entry(ns, &dev->namespaces, list)
-		add_disk(ns->disk);
-	res = 0;
-
- out:
-	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
-	return res;
+	return 0;
 }
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2499,6 @@
 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
 	struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
-	nvme_clear_queue(nvmeq);
 	nvme_put_dq(dq);
 }
 
@@ -2502,7 +2641,6 @@
 	int i;
 	u32 csts = -1;
 
-	dev->initialized = 0;
 	nvme_dev_list_remove(dev);
 
 	if (dev->bar) {
@@ -2513,7 +2651,6 @@
 		for (i = dev->queue_count - 1; i >= 0; i--) {
 			struct nvme_queue *nvmeq = dev->queues[i];
 			nvme_suspend_queue(nvmeq);
-			nvme_clear_queue(nvmeq);
 		}
 	} else {
 		nvme_disable_io_queues(dev);
@@ -2521,6 +2658,9 @@
 		nvme_disable_queue(dev, 0);
 	}
 	nvme_dev_unmap(dev);
+
+	for (i = dev->queue_count - 1; i >= 0; i--)
+		nvme_clear_queue(dev->queues[i]);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2668,11 @@
 	struct nvme_ns *ns;
 
 	list_for_each_entry(ns, &dev->namespaces, list) {
-		if (ns->disk->flags & GENHD_FL_UP)
+		if (ns->disk->flags & GENHD_FL_UP) {
+			if (blk_get_integrity(ns->disk))
+				blk_integrity_unregister(ns->disk);
 			del_gendisk(ns->disk);
+		}
 		if (!blk_queue_dying(ns->queue)) {
 			blk_mq_abort_requeue_list(ns->queue);
 			blk_cleanup_queue(ns->queue);
@@ -2611,6 +2754,7 @@
 	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
 	pci_dev_put(dev->pci_dev);
+	put_device(dev->device);
 	nvme_free_namespaces(dev);
 	nvme_release_instance(dev);
 	blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2766,27 @@
 
 static int nvme_dev_open(struct inode *inode, struct file *f)
 {
-	struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
-								miscdev);
-	kref_get(&dev->kref);
-	f->private_data = dev;
-	return 0;
+	struct nvme_dev *dev;
+	int instance = iminor(inode);
+	int ret = -ENODEV;
+
+	spin_lock(&dev_list_lock);
+	list_for_each_entry(dev, &dev_list, node) {
+		if (dev->instance == instance) {
+			if (!dev->admin_q) {
+				ret = -EWOULDBLOCK;
+				break;
+			}
+			if (!kref_get_unless_zero(&dev->kref))
+				break;
+			f->private_data = dev;
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock(&dev_list_lock);
+
+	return ret;
 }
 
 static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2928,6 @@
 		nvme_unfreeze_queues(dev);
 		nvme_set_irq_hints(dev);
 	}
-	dev->initialized = 1;
 	return 0;
 }
 
@@ -2799,6 +2958,7 @@
 	dev->reset_workfn(work);
 }
 
+static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	int node, result = -ENOMEM;
@@ -2834,37 +2994,20 @@
 		goto release;
 
 	kref_init(&dev->kref);
-	result = nvme_dev_start(dev);
-	if (result)
+	dev->device = device_create(nvme_class, &pdev->dev,
+				MKDEV(nvme_char_major, dev->instance),
+				dev, "nvme%d", dev->instance);
+	if (IS_ERR(dev->device)) {
+		result = PTR_ERR(dev->device);
 		goto release_pools;
+	}
+	get_device(dev->device);
 
-	if (dev->online_queues > 1)
-		result = nvme_dev_add(dev);
-	if (result)
-		goto shutdown;
-
-	scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
-	dev->miscdev.minor = MISC_DYNAMIC_MINOR;
-	dev->miscdev.parent = &pdev->dev;
-	dev->miscdev.name = dev->name;
-	dev->miscdev.fops = &nvme_dev_fops;
-	result = misc_register(&dev->miscdev);
-	if (result)
-		goto remove;
-
-	nvme_set_irq_hints(dev);
-
-	dev->initialized = 1;
+	INIT_WORK(&dev->probe_work, nvme_async_probe);
+	schedule_work(&dev->probe_work);
 	return 0;
 
- remove:
-	nvme_dev_remove(dev);
-	nvme_dev_remove_admin(dev);
-	nvme_free_namespaces(dev);
- shutdown:
-	nvme_dev_shutdown(dev);
  release_pools:
-	nvme_free_queues(dev, 0);
 	nvme_release_prp_pools(dev);
  release:
 	nvme_release_instance(dev);
@@ -2877,6 +3020,29 @@
 	return result;
 }
 
+static void nvme_async_probe(struct work_struct *work)
+{
+	struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+	int result;
+
+	result = nvme_dev_start(dev);
+	if (result)
+		goto reset;
+
+	if (dev->online_queues > 1)
+		result = nvme_dev_add(dev);
+	if (result)
+		goto reset;
+
+	nvme_set_irq_hints(dev);
+	return;
+ reset:
+	if (!work_busy(&dev->reset_work)) {
+		dev->reset_workfn = nvme_reset_failed_dev;
+		queue_work(nvme_workq, &dev->reset_work);
+	}
+}
+
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3068,12 @@
 	spin_unlock(&dev_list_lock);
 
 	pci_set_drvdata(pdev, NULL);
+	flush_work(&dev->probe_work);
 	flush_work(&dev->reset_work);
-	misc_deregister(&dev->miscdev);
 	nvme_dev_shutdown(dev);
 	nvme_dev_remove(dev);
 	nvme_dev_remove_admin(dev);
+	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
 	nvme_free_queues(dev, 0);
 	nvme_release_prp_pools(dev);
 	kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3157,26 @@
 	else if (result > 0)
 		nvme_major = result;
 
+	result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
+							&nvme_dev_fops);
+	if (result < 0)
+		goto unregister_blkdev;
+	else if (result > 0)
+		nvme_char_major = result;
+
+	nvme_class = class_create(THIS_MODULE, "nvme");
+	if (!nvme_class)
+		goto unregister_chrdev;
+
 	result = pci_register_driver(&nvme_driver);
 	if (result)
-		goto unregister_blkdev;
+		goto destroy_class;
 	return 0;
 
+ destroy_class:
+	class_destroy(nvme_class);
+ unregister_chrdev:
+	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
  unregister_blkdev:
 	unregister_blkdev(nvme_major, "nvme");
  kill_workq:
@@ -3005,9 +3187,10 @@
 static void __exit nvme_exit(void)
 {
 	pci_unregister_driver(&nvme_driver);
-	unregister_hotcpu_notifier(&nvme_nb);
 	unregister_blkdev(nvme_major, "nvme");
 	destroy_workqueue(nvme_workq);
+	class_destroy(nvme_class);
+	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
 	BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
 	_nvme_check_size();
 }
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 5e78568..e10196e 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -779,10 +779,8 @@
 	struct nvme_dev *dev = ns->dev;
 	dma_addr_t dma_addr;
 	void *mem;
-	struct nvme_id_ctrl *id_ctrl;
 	int res = SNTI_TRANSLATION_SUCCESS;
 	int nvme_sc;
-	u8 ieee[4];
 	int xfer_len;
 	__be32 tmp_id = cpu_to_be32(ns->ns_id);
 
@@ -793,46 +791,60 @@
 		goto out_dma;
 	}
 
-	/* nvme controller identify */
-	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		goto out_free;
-	if (nvme_sc) {
-		res = nvme_sc;
-		goto out_free;
-	}
-	id_ctrl = mem;
-
-	/* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
-	ieee[0] = id_ctrl->ieee[0] << 4;
-	ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
-	ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
-	ieee[3] = id_ctrl->ieee[2] >> 4;
-
-	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+	memset(inq_response, 0, alloc_len);
 	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
-	inq_response[3] = 20;      /* Page Length */
-	/* Designation Descriptor start */
-	inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
-	inq_response[5] = 0x03;    /* PIV=0b | Asso=00b | Designator Type=3h */
-	inq_response[6] = 0x00;    /* Rsvd */
-	inq_response[7] = 16;      /* Designator Length */
-	/* Designator start */
-	inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
-	inq_response[9] = ieee[2];        /* IEEE ID */
-	inq_response[10] = ieee[1];       /* IEEE ID */
-	inq_response[11] = ieee[0];       /* IEEE ID| Vendor Specific ID... */
-	inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
-	inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
-	inq_response[14] = dev->serial[0];
-	inq_response[15] = dev->serial[1];
-	inq_response[16] = dev->model[0];
-	inq_response[17] = dev->model[1];
-	memcpy(&inq_response[18], &tmp_id, sizeof(u32));
-	/* Last 2 bytes are zero */
+	if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
+		struct nvme_id_ns *id_ns = mem;
+		void *eui = id_ns->eui64;
+		int len = sizeof(id_ns->eui64);
 
-	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+		nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+		res = nvme_trans_status_code(hdr, nvme_sc);
+		if (res)
+			goto out_free;
+		if (nvme_sc) {
+			res = nvme_sc;
+			goto out_free;
+		}
+
+		if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
+			if (bitmap_empty(eui, len * 8)) {
+				eui = id_ns->nguid;
+				len = sizeof(id_ns->nguid);
+			}
+		}
+		if (bitmap_empty(eui, len * 8))
+			goto scsi_string;
+
+		inq_response[3] = 4 + len; /* Page Length */
+		/* Designation Descriptor start */
+		inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
+		inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
+		inq_response[6] = 0x00;    /* Rsvd */
+		inq_response[7] = len;     /* Designator Length */
+		memcpy(&inq_response[8], eui, len);
+	} else {
+ scsi_string:
+		if (alloc_len < 72) {
+			res = nvme_trans_completion(hdr,
+					SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			goto out_free;
+		}
+		inq_response[3] = 0x48;    /* Page Length */
+		/* Designation Descriptor start */
+		inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
+		inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
+		inq_response[6] = 0x00;    /* Rsvd */
+		inq_response[7] = 0x44;    /* Designator Length */
+
+		sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+		memcpy(&inq_response[12], dev->model, sizeof(dev->model));
+		sprintf(&inq_response[52], "%04x", tmp_id);
+		memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
+	}
+	xfer_len = alloc_len;
 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
@@ -1600,7 +1612,7 @@
 		/* 10 Byte CDB */
 		*bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
 			parm_list[MODE_SELECT_10_BD_OFFSET + 1];
-		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
 				MODE_SELECT_10_LLBAA_MASK;
 	} else {
 		/* 6 Byte CDB */
@@ -2222,7 +2234,7 @@
 	page_code = GET_INQ_PAGE_CODE(cmd);
 	alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-	inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+	inq_response = kmalloc(alloc_len, GFP_KERNEL);
 	if (inq_response == NULL) {
 		res = -ENOMEM;
 		goto out_mem;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8e233ed..871bd35 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,7 +528,7 @@
 static inline void update_used_max(struct zram *zram,
 					const unsigned long pages)
 {
-	int old_max, cur_max;
+	unsigned long old_max, cur_max;
 
 	old_max = atomic_long_read(&zram->stats.max_used_pages);
 
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b876888..8bfc4c2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -272,6 +272,7 @@
 	{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
 
 	/* Intel Bluetooth devices */
+	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
 	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
 	{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index ec318bf..1786574 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -157,12 +157,16 @@
 {
 	struct ipmi_file_private *priv = file->private_data;
 	int                      rv;
+	struct  ipmi_recv_msg *msg, *next;
 
 	rv = ipmi_destroy_user(priv->user);
 	if (rv)
 		return rv;
 
-	/* FIXME - free the messages in the list. */
+	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+		ipmi_free_recv_msg(msg);
+
+
 	kfree(priv);
 
 	return 0;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6b65fa4..9bb5928 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1483,14 +1483,10 @@
 	smi_msg->msgid = msgid;
 }
 
-static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
-		     struct ipmi_smi_msg *smi_msg, int priority)
+static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
+					     struct ipmi_smi_msg *smi_msg,
+					     int priority)
 {
-	int run_to_completion = intf->run_to_completion;
-	unsigned long flags;
-
-	if (!run_to_completion)
-		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
 	if (intf->curr_msg) {
 		if (priority > 0)
 			list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@
 	} else {
 		intf->curr_msg = smi_msg;
 	}
-	if (!run_to_completion)
+
+	return smi_msg;
+}
+
+
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+		     struct ipmi_smi_msg *smi_msg, int priority)
+{
+	int run_to_completion = intf->run_to_completion;
+
+	if (run_to_completion) {
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
 		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+	}
 
 	if (smi_msg)
 		handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@
 	seq_printf(m, "%x", intf->channels[0].address);
 	for (i = 1; i < IPMI_MAX_CHANNELS; i++)
 		seq_printf(m, " %x", intf->channels[i].address);
-	return seq_putc(m, '\n');
+	seq_putc(m, '\n');
+
+	return seq_has_overflowed(m);
 }
 
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@
 {
 	ipmi_smi_t intf = m->private;
 
-	return seq_printf(m, "%u.%u\n",
-		       ipmi_version_major(&intf->bmc->id),
-		       ipmi_version_minor(&intf->bmc->id));
+	seq_printf(m, "%u.%u\n",
+		   ipmi_version_major(&intf->bmc->id),
+		   ipmi_version_minor(&intf->bmc->id));
+
+	return seq_has_overflowed(m);
 }
 
 static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@
 	&dev_attr_additional_device_support.attr,
 	&dev_attr_manufacturer_id.attr,
 	&dev_attr_product_id.attr,
+	&dev_attr_aux_firmware_revision.attr,
+	&dev_attr_guid.attr,
 	NULL
 };
 
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+				       struct attribute *attr, int idx)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct bmc_device *bmc = to_bmc_device(dev);
+	umode_t mode = attr->mode;
+
+	if (attr == &dev_attr_aux_firmware_revision.attr)
+		return bmc->id.aux_firmware_revision_set ? mode : 0;
+	if (attr == &dev_attr_guid.attr)
+		return bmc->guid_set ? mode : 0;
+	return mode;
+}
+
 static struct attribute_group bmc_dev_attr_group = {
 	.attrs		= bmc_dev_attrs,
+	.is_visible	= bmc_dev_attr_is_visible,
 };
 
 static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@
 {
 	struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
 
-	if (bmc->id.aux_firmware_revision_set)
-		device_remove_file(&bmc->pdev.dev,
-				   &dev_attr_aux_firmware_revision);
-	if (bmc->guid_set)
-		device_remove_file(&bmc->pdev.dev,
-				   &dev_attr_guid);
-
 	platform_device_unregister(&bmc->pdev);
 }
 
@@ -2407,33 +2434,6 @@
 	mutex_unlock(&ipmidriver_mutex);
 }
 
-static int create_bmc_files(struct bmc_device *bmc)
-{
-	int err;
-
-	if (bmc->id.aux_firmware_revision_set) {
-		err = device_create_file(&bmc->pdev.dev,
-					 &dev_attr_aux_firmware_revision);
-		if (err)
-			goto out;
-	}
-	if (bmc->guid_set) {
-		err = device_create_file(&bmc->pdev.dev,
-					 &dev_attr_guid);
-		if (err)
-			goto out_aux_firm;
-	}
-
-	return 0;
-
-out_aux_firm:
-	if (bmc->id.aux_firmware_revision_set)
-		device_remove_file(&bmc->pdev.dev,
-				   &dev_attr_aux_firmware_revision);
-out:
-	return err;
-}
-
 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
 {
 	int               rv;
@@ -2522,15 +2522,6 @@
 			return rv;
 		}
 
-		rv = create_bmc_files(bmc);
-		if (rv) {
-			mutex_lock(&ipmidriver_mutex);
-			platform_device_unregister(&bmc->pdev);
-			mutex_unlock(&ipmidriver_mutex);
-
-			return rv;
-		}
-
 		dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
 			 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
 			 bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@
 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
 
-/* FIXME - convert these to slabs. */
 static void free_smi_msg(struct ipmi_smi_msg *msg)
 {
 	atomic_dec(&smi_msg_inuse_count);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 967b73a..f6646ed 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -321,6 +321,18 @@
 static void cleanup_one_si(struct smi_info *to_clean);
 static void cleanup_ipmi_si(void);
 
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+	struct timespec64 t;
+
+	getnstimeofday64(&t);
+	pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
 {
@@ -358,9 +370,6 @@
 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 {
 	int              rv;
-#ifdef DEBUG_TIMING
-	struct timeval t;
-#endif
 
 	if (!smi_info->waiting_msg) {
 		smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@
 
 		smi_info->curr_msg = smi_info->waiting_msg;
 		smi_info->waiting_msg = NULL;
-#ifdef DEBUG_TIMING
-		do_gettimeofday(&t);
-		printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+		debug_timestamp("Start2");
 		err = atomic_notifier_call_chain(&xaction_notifier_list,
 				0, smi_info);
 		if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@
 static void handle_transaction_done(struct smi_info *smi_info)
 {
 	struct ipmi_smi_msg *msg;
-#ifdef DEBUG_TIMING
-	struct timeval t;
 
-	do_gettimeofday(&t);
-	printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+	debug_timestamp("Done");
 	switch (smi_info->si_state) {
 	case SI_NORMAL:
 		if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@
 	struct smi_info   *smi_info = send_info;
 	enum si_sm_result result;
 	unsigned long     flags;
-#ifdef DEBUG_TIMING
-	struct timeval    t;
-#endif
 
-	BUG_ON(smi_info->waiting_msg);
-	smi_info->waiting_msg = msg;
-
-#ifdef DEBUG_TIMING
-	do_gettimeofday(&t);
-	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+	debug_timestamp("Enqueue");
 
 	if (smi_info->run_to_completion) {
 		/*
 		 * If we are running to completion, start it and run
 		 * transactions until everything is clear.
 		 */
-		smi_info->curr_msg = smi_info->waiting_msg;
+		smi_info->curr_msg = msg;
 		smi_info->waiting_msg = NULL;
 
 		/*
@@ -964,6 +957,15 @@
 	}
 
 	spin_lock_irqsave(&smi_info->si_lock, flags);
+	/*
+	 * The following two lines don't need to be under the lock for
+	 * the lock's sake, but they do need SMP memory barriers to
+	 * avoid getting things out of order.  We are already claiming
+	 * the lock, anyway, so just do it under the lock to avoid the
+	 * ordering problem.
+	 */
+	BUG_ON(smi_info->waiting_msg);
+	smi_info->waiting_msg = msg;
 	check_start_timer_thread(smi_info);
 	spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
@@ -989,18 +991,18 @@
  * we are spinning in kipmid looking for something and not delaying
  * between checks
  */
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 {
 	ts->tv_nsec = -1;
 }
-static inline int ipmi_si_is_busy(struct timespec *ts)
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
 {
 	return ts->tv_nsec != -1;
 }
 
 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
 					const struct smi_info *smi_info,
-					struct timespec *busy_until)
+					struct timespec64 *busy_until)
 {
 	unsigned int max_busy_us = 0;
 
@@ -1009,12 +1011,13 @@
 	if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 		ipmi_si_set_not_busy(busy_until);
 	else if (!ipmi_si_is_busy(busy_until)) {
-		getnstimeofday(busy_until);
-		timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+		getnstimeofday64(busy_until);
+		timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
 	} else {
-		struct timespec now;
-		getnstimeofday(&now);
-		if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+		struct timespec64 now;
+
+		getnstimeofday64(&now);
+		if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
 			ipmi_si_set_not_busy(busy_until);
 			return 0;
 		}
@@ -1037,7 +1040,7 @@
 	struct smi_info *smi_info = data;
 	unsigned long flags;
 	enum si_sm_result smi_result;
-	struct timespec busy_until;
+	struct timespec64 busy_until;
 
 	ipmi_si_set_not_busy(&busy_until);
 	set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@
 	unsigned long     jiffies_now;
 	long              time_diff;
 	long		  timeout;
-#ifdef DEBUG_TIMING
-	struct timeval    t;
-#endif
 
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
-#ifdef DEBUG_TIMING
-	do_gettimeofday(&t);
-	printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+	debug_timestamp("Timer");
+
 	jiffies_now = jiffies;
 	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
 		     * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@
 {
 	struct smi_info *smi_info = data;
 	unsigned long   flags;
-#ifdef DEBUG_TIMING
-	struct timeval  t;
-#endif
 
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
 
 	smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-	do_gettimeofday(&t);
-	printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+	debug_timestamp("Interrupt");
+
 	smi_event_handler(smi_info, 0);
 	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 	return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@
 {
 	struct smi_info *smi_info = context;
 	unsigned long   flags;
-#ifdef DEBUG_TIMING
-	struct timeval t;
-#endif
 
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
 
 	smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-	do_gettimeofday(&t);
-	printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+	debug_timestamp("ACPI_GPE");
+
 	smi_event_handler(smi_info, 0);
 	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 
@@ -2071,7 +2059,6 @@
 	if (!info->irq)
 		return 0;
 
-	/* FIXME - is level triggered right? */
 	status = acpi_install_gpe_handler(NULL,
 					  info->irq,
 					  ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@
 {
 	struct smi_info *smi = m->private;
 
-	return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+	seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+
+	return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@
 {
 	struct smi_info *smi = m->private;
 
-	return seq_printf(m,
-		       "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
-		       si_to_str[smi->si_type],
-		       addr_space_to_str[smi->io.addr_type],
-		       smi->io.addr_data,
-		       smi->io.regspacing,
-		       smi->io.regsize,
-		       smi->io.regshift,
-		       smi->irq,
-		       smi->slave_addr);
+	seq_printf(m,
+		   "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+		   si_to_str[smi->si_type],
+		   addr_space_to_str[smi->io.addr_type],
+		   smi->io.addr_data,
+		   smi->io.regspacing,
+		   smi->io.regsize,
+		   smi->io.regshift,
+		   smi->irq,
+		   smi->slave_addr);
+
+	return seq_has_overflowed(m);
 }
 
 static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 982b963..f6e378d 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1097,8 +1097,6 @@
 	if (!ssif_info)
 		return 0;
 
-	i2c_set_clientdata(client, NULL);
-
 	/*
 	 * After this point, we won't deliver anything asychronously
 	 * to the message handler.  We can unregister ourself.
@@ -1198,7 +1196,9 @@
 
 static int smi_type_proc_show(struct seq_file *m, void *v)
 {
-	return seq_puts(m, "ssif\n");
+	seq_puts(m, "ssif\n");
+
+	return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 1d278cc..e096e9c 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -140,16 +140,6 @@
 {
 	int rc;
 
-	rc = device_add(&chip->dev);
-	if (rc) {
-		dev_err(&chip->dev,
-			"unable to device_register() %s, major %d, minor %d, err=%d\n",
-			chip->devname, MAJOR(chip->dev.devt),
-			MINOR(chip->dev.devt), rc);
-
-		return rc;
-	}
-
 	rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
 	if (rc) {
 		dev_err(&chip->dev,
@@ -161,6 +151,16 @@
 		return rc;
 	}
 
+	rc = device_add(&chip->dev);
+	if (rc) {
+		dev_err(&chip->dev,
+			"unable to device_register() %s, major %d, minor %d, err=%d\n",
+			chip->devname, MAJOR(chip->dev.devt),
+			MINOR(chip->dev.devt), rc);
+
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -174,27 +174,17 @@
  * tpm_chip_register() - create a character device for the TPM chip
  * @chip: TPM chip to use.
  *
- * Creates a character device for the TPM chip and adds sysfs interfaces for
- * the device, PPI and TCPA. As the last step this function adds the
- * chip to the list of TPM chips available for use.
+ * Creates a character device for the TPM chip and adds sysfs attributes for
+ * the device. As the last step this function adds the chip to the list of TPM
+ * chips available for in-kernel use.
  *
- * NOTE: This function should be only called after the chip initialization
- * is complete.
- *
- * Called from tpm_<specific>.c probe function only for devices
- * the driver has determined it should claim.  Prior to calling
- * this function the specific probe function has called pci_enable_device
- * upon errant exit from this function specific probe function should call
- * pci_disable_device
+ * This function should be only called after the chip initialization is
+ * complete.
  */
 int tpm_chip_register(struct tpm_chip *chip)
 {
 	int rc;
 
-	rc = tpm_dev_add_device(chip);
-	if (rc)
-		return rc;
-
 	/* Populate sysfs for TPM1 devices. */
 	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
 		rc = tpm_sysfs_add_device(chip);
@@ -208,6 +198,10 @@
 		chip->bios_dir = tpm_bios_log_setup(chip->devname);
 	}
 
+	rc = tpm_dev_add_device(chip);
+	if (rc)
+		return rc;
+
 	/* Make the chip available. */
 	spin_lock(&driver_lock);
 	list_add_rcu(&chip->list, &tpm_chip_list);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index b1e53e3..42ffa5e7 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -124,7 +124,7 @@
 {
 	struct ibmvtpm_dev *ibmvtpm;
 	struct ibmvtpm_crq crq;
-	u64 *word = (u64 *) &crq;
+	__be64 *word = (__be64 *)&crq;
 	int rc;
 
 	ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
@@ -145,11 +145,11 @@
 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
 	crq.valid = (u8)IBMVTPM_VALID_CMD;
 	crq.msg = (u8)VTPM_TPM_COMMAND;
-	crq.len = (u16)count;
-	crq.data = ibmvtpm->rtce_dma_handle;
+	crq.len = cpu_to_be16(count);
+	crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
 
-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
-			      cpu_to_be64(word[1]));
+	rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
+			      be64_to_cpu(word[1]));
 	if (rc != H_SUCCESS) {
 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
 		rc = 0;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index f595f14..6af9289 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -22,9 +22,9 @@
 struct ibmvtpm_crq {
 	u8 valid;
 	u8 msg;
-	u16 len;
-	u32 data;
-	u64 reserved;
+	__be16 len;
+	__be32 data;
+	__be64 reserved;
 } __attribute__((packed, aligned(8)));
 
 struct ibmvtpm_crq_queue {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fae2dbb..72d7028 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -142,6 +142,7 @@
 	 * notification
 	 */
 	struct work_struct control_work;
+	struct work_struct config_work;
 
 	struct list_head ports;
 
@@ -1837,10 +1838,21 @@
 
 	portdev = vdev->priv;
 
+	if (!use_multiport(portdev))
+		schedule_work(&portdev->config_work);
+}
+
+static void config_work_handler(struct work_struct *work)
+{
+	struct ports_device *portdev;
+
+	portdev = container_of(work, struct ports_device, control_work);
 	if (!use_multiport(portdev)) {
+		struct virtio_device *vdev;
 		struct port *port;
 		u16 rows, cols;
 
+		vdev = portdev->vdev;
 		virtio_cread(vdev, struct virtio_console_config, cols, &cols);
 		virtio_cread(vdev, struct virtio_console_config, rows, &rows);
 
@@ -2040,12 +2052,14 @@
 
 	virtio_device_ready(portdev->vdev);
 
+	INIT_WORK(&portdev->config_work, &config_work_handler);
+	INIT_WORK(&portdev->control_work, &control_work_handler);
+
 	if (multiport) {
 		unsigned int nr_added_bufs;
 
 		spin_lock_init(&portdev->c_ivq_lock);
 		spin_lock_init(&portdev->c_ovq_lock);
-		INIT_WORK(&portdev->control_work, &control_work_handler);
 
 		nr_added_bufs = fill_queue(portdev->c_ivq,
 					   &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@
 	/* Finish up work that's lined up */
 	if (use_multiport(portdev))
 		cancel_work_sync(&portdev->control_work);
+	else
+		cancel_work_sync(&portdev->config_work);
 
 	list_for_each_entry_safe(port, port2, &portdev->ports, list)
 		unplug_port(port);
@@ -2164,6 +2180,7 @@
 
 	virtqueue_disable_cb(portdev->c_ivq);
 	cancel_work_sync(&portdev->control_work);
+	cancel_work_sync(&portdev->config_work);
 	/*
 	 * Once more: if control_work_handler() was running, it would
 	 * enable the cb as the last step.
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 91f8613..0b474a0 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -102,12 +102,12 @@
 	  Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
 	  FPGAs. It is commonly used in Analog Devices' reference designs.
 
-config CLK_PPC_CORENET
-	bool "Clock driver for PowerPC corenet platforms"
-	depends on PPC_E500MC && OF
+config CLK_QORIQ
+	bool "Clock driver for Freescale QorIQ platforms"
+	depends on (PPC_E500MC || ARM) && OF
 	---help---
-	  This adds the clock driver support for Freescale PowerPC corenet
-	  platforms using common clock framework.
+	  This adds the clock driver support for Freescale QorIQ platforms
+	  using common clock framework.
 
 config COMMON_CLK_XGENE
 	bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@
 	---help---
 	  Sypport for the Marvell PXA SoC.
 
+config COMMON_CLK_CDCE706
+	tristate "Clock driver for TI CDCE706 clock synthesizer"
+	depends on I2C
+	select REGMAP_I2C
+	select RATIONAL
+	---help---
+	  This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5fba5b..d478ceb 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -16,9 +16,11 @@
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_MACH_ASM9260)		+= clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)	+= clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)		+= clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)		+= clk-bcm2835.o
+obj-$(CONFIG_COMMON_CLK_CDCE706)	+= clk-cdce706.o
 obj-$(CONFIG_ARCH_CLPS711X)		+= clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)		+= clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)		+= clk-highbank.o
@@ -30,7 +32,7 @@
 obj-$(CONFIG_ARCH_NOMADIK)		+= clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)		+= clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)		+= clk-palmas.o
-obj-$(CONFIG_CLK_PPC_CORENET)		+= clk-ppc-corenet.o
+obj-$(CONFIG_CLK_QORIQ)			+= clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)		+= clk-rk808.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)	+= clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)		+= clk-si5351.o
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index bbdb1b9..86c8a07 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -56,6 +56,8 @@
 
 static long clk_programmable_determine_rate(struct clk_hw *hw,
 					    unsigned long rate,
+					    unsigned long min_rate,
+					    unsigned long max_rate,
 					    unsigned long *best_parent_rate,
 					    struct clk_hw **best_parent_hw)
 {
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index f07c815..3f27d21 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -89,12 +89,29 @@
 	return 0;
 }
 
+static void pmc_irq_suspend(struct irq_data *d)
+{
+	struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+	pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
+	pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
+}
+
+static void pmc_irq_resume(struct irq_data *d)
+{
+	struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+	pmc_write(pmc, AT91_PMC_IER, pmc->imr);
+}
+
 static struct irq_chip pmc_irq = {
 	.name = "PMC",
 	.irq_disable = pmc_irq_mask,
 	.irq_mask = pmc_irq_mask,
 	.irq_unmask = pmc_irq_unmask,
 	.irq_set_type = pmc_irq_set_type,
+	.irq_suspend = pmc_irq_suspend,
+	.irq_resume = pmc_irq_resume,
 };
 
 static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@
 		goto out_free_pmc;
 
 	pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
-	if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+	if (request_irq(pmc->virq, pmc_irq_handler,
+			IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
 		goto out_remove_irqdomain;
 
 	return pmc;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 52d2041..69abb08 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -33,6 +33,7 @@
 	spinlock_t lock;
 	const struct at91_pmc_caps *caps;
 	struct irq_domain *irqdomain;
+	u32 imr;
 };
 
 static inline void pmc_lock(struct at91_pmc *pmc)
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 1c06f6f..05abae8 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,6 +1032,8 @@
 }
 
 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long min_rate,
+		unsigned long max_rate,
 		unsigned long *best_parent_rate, struct clk_hw **best_parent)
 {
 	struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644
index 0000000..88f4ff6
--- /dev/null
+++ b/drivers/clk/clk-asm9260.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/alphascale,asm9260.h>
+
+#define HW_AHBCLKCTRL0		0x0020
+#define HW_AHBCLKCTRL1		0x0030
+#define HW_SYSPLLCTRL		0x0100
+#define HW_MAINCLKSEL		0x0120
+#define HW_MAINCLKUEN		0x0124
+#define HW_UARTCLKSEL		0x0128
+#define HW_UARTCLKUEN		0x012c
+#define HW_I2S0CLKSEL		0x0130
+#define HW_I2S0CLKUEN		0x0134
+#define HW_I2S1CLKSEL		0x0138
+#define HW_I2S1CLKUEN		0x013c
+#define HW_WDTCLKSEL		0x0160
+#define HW_WDTCLKUEN		0x0164
+#define HW_CLKOUTCLKSEL		0x0170
+#define HW_CLKOUTCLKUEN		0x0174
+#define HW_CPUCLKDIV		0x017c
+#define HW_SYSAHBCLKDIV		0x0180
+#define HW_I2S0MCLKDIV		0x0190
+#define HW_I2S0SCLKDIV		0x0194
+#define HW_I2S1MCLKDIV		0x0188
+#define HW_I2S1SCLKDIV		0x018c
+#define HW_UART0CLKDIV		0x0198
+#define HW_UART1CLKDIV		0x019c
+#define HW_UART2CLKDIV		0x01a0
+#define HW_UART3CLKDIV		0x01a4
+#define HW_UART4CLKDIV		0x01a8
+#define HW_UART5CLKDIV		0x01ac
+#define HW_UART6CLKDIV		0x01b0
+#define HW_UART7CLKDIV		0x01b4
+#define HW_UART8CLKDIV		0x01b8
+#define HW_UART9CLKDIV		0x01bc
+#define HW_SPI0CLKDIV		0x01c0
+#define HW_SPI1CLKDIV		0x01c4
+#define HW_QUADSPICLKDIV	0x01c8
+#define HW_SSP0CLKDIV		0x01d0
+#define HW_NANDCLKDIV		0x01d4
+#define HW_TRACECLKDIV		0x01e0
+#define HW_CAMMCLKDIV		0x01e8
+#define HW_WDTCLKDIV		0x01ec
+#define HW_CLKOUTCLKDIV		0x01f4
+#define HW_MACCLKDIV		0x01f8
+#define HW_LCDCLKDIV		0x01fc
+#define HW_ADCANACLKDIV		0x0200
+
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(asm9260_clk_lock);
+
+struct asm9260_div_clk {
+	unsigned int idx;
+	const char *name;
+	const char *parent_name;
+	u32 reg;
+};
+
+struct asm9260_gate_data {
+	unsigned int idx;
+	const char *name;
+	const char *parent_name;
+	u32 reg;
+	u8 bit_idx;
+	unsigned long flags;
+};
+
+struct asm9260_mux_clock {
+	u8			mask;
+	u32			*table;
+	const char		*name;
+	const char		**parent_names;
+	u8			num_parents;
+	unsigned long		offset;
+	unsigned long		flags;
+};
+
+static void __iomem *base;
+
+static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
+	{ CLKID_SYS_CPU,	"cpu_div", "main_gate", HW_CPUCLKDIV },
+	{ CLKID_SYS_AHB,	"ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
+
+	/* i2s has two deviders: one for only external mclk and internal
+	 * devider for all clks. */
+	{ CLKID_SYS_I2S0M,	"i2s0m_div", "i2s0_mclk",  HW_I2S0MCLKDIV },
+	{ CLKID_SYS_I2S1M,	"i2s1m_div", "i2s1_mclk",  HW_I2S1MCLKDIV },
+	{ CLKID_SYS_I2S0S,	"i2s0s_div", "i2s0_gate",  HW_I2S0SCLKDIV },
+	{ CLKID_SYS_I2S1S,	"i2s1s_div", "i2s0_gate",  HW_I2S1SCLKDIV },
+
+	{ CLKID_SYS_UART0,	"uart0_div", "uart_gate", HW_UART0CLKDIV },
+	{ CLKID_SYS_UART1,	"uart1_div", "uart_gate", HW_UART1CLKDIV },
+	{ CLKID_SYS_UART2,	"uart2_div", "uart_gate", HW_UART2CLKDIV },
+	{ CLKID_SYS_UART3,	"uart3_div", "uart_gate", HW_UART3CLKDIV },
+	{ CLKID_SYS_UART4,	"uart4_div", "uart_gate", HW_UART4CLKDIV },
+	{ CLKID_SYS_UART5,	"uart5_div", "uart_gate", HW_UART5CLKDIV },
+	{ CLKID_SYS_UART6,	"uart6_div", "uart_gate", HW_UART6CLKDIV },
+	{ CLKID_SYS_UART7,	"uart7_div", "uart_gate", HW_UART7CLKDIV },
+	{ CLKID_SYS_UART8,	"uart8_div", "uart_gate", HW_UART8CLKDIV },
+	{ CLKID_SYS_UART9,	"uart9_div", "uart_gate", HW_UART9CLKDIV },
+
+	{ CLKID_SYS_SPI0,	"spi0_div",	"main_gate", HW_SPI0CLKDIV },
+	{ CLKID_SYS_SPI1,	"spi1_div",	"main_gate", HW_SPI1CLKDIV },
+	{ CLKID_SYS_QUADSPI,	"quadspi_div",	"main_gate", HW_QUADSPICLKDIV },
+	{ CLKID_SYS_SSP0,	"ssp0_div",	"main_gate", HW_SSP0CLKDIV },
+	{ CLKID_SYS_NAND,	"nand_div",	"main_gate", HW_NANDCLKDIV },
+	{ CLKID_SYS_TRACE,	"trace_div",	"main_gate", HW_TRACECLKDIV },
+	{ CLKID_SYS_CAMM,	"camm_div",	"main_gate", HW_CAMMCLKDIV },
+	{ CLKID_SYS_MAC,	"mac_div",	"main_gate", HW_MACCLKDIV },
+	{ CLKID_SYS_LCD,	"lcd_div",	"main_gate", HW_LCDCLKDIV },
+	{ CLKID_SYS_ADCANA,	"adcana_div",	"main_gate", HW_ADCANACLKDIV },
+
+	{ CLKID_SYS_WDT,	"wdt_div",	"wdt_gate",    HW_WDTCLKDIV },
+	{ CLKID_SYS_CLKOUT,	"clkout_div",	"clkout_gate", HW_CLKOUTCLKDIV },
+};
+
+static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
+	{ 0, "main_gate",	"main_mux",	HW_MAINCLKUEN,	0 },
+	{ 0, "uart_gate",	"uart_mux",	HW_UARTCLKUEN,	0 },
+	{ 0, "i2s0_gate",	"i2s0_mux",	HW_I2S0CLKUEN,	0 },
+	{ 0, "i2s1_gate",	"i2s1_mux",	HW_I2S1CLKUEN,	0 },
+	{ 0, "wdt_gate",	"wdt_mux",	HW_WDTCLKUEN,	0 },
+	{ 0, "clkout_gate",	"clkout_mux",	HW_CLKOUTCLKUEN, 0 },
+};
+static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
+	/* ahb gates */
+	{ CLKID_AHB_ROM,	"rom",		"ahb_div",
+		HW_AHBCLKCTRL0,	1, CLK_IGNORE_UNUSED},
+	{ CLKID_AHB_RAM,	"ram",		"ahb_div",
+		HW_AHBCLKCTRL0,	2, CLK_IGNORE_UNUSED},
+	{ CLKID_AHB_GPIO,	"gpio",		"ahb_div",
+		HW_AHBCLKCTRL0,	4 },
+	{ CLKID_AHB_MAC,	"mac",		"ahb_div",
+		HW_AHBCLKCTRL0,	5 },
+	{ CLKID_AHB_EMI,	"emi",		"ahb_div",
+		HW_AHBCLKCTRL0,	6, CLK_IGNORE_UNUSED},
+	{ CLKID_AHB_USB0,	"usb0",		"ahb_div",
+		HW_AHBCLKCTRL0,	7 },
+	{ CLKID_AHB_USB1,	"usb1",		"ahb_div",
+		HW_AHBCLKCTRL0,	8 },
+	{ CLKID_AHB_DMA0,	"dma0",		"ahb_div",
+		HW_AHBCLKCTRL0,	9 },
+	{ CLKID_AHB_DMA1,	"dma1",		"ahb_div",
+		HW_AHBCLKCTRL0,	10 },
+	{ CLKID_AHB_UART0,	"uart0",	"ahb_div",
+		HW_AHBCLKCTRL0,	11 },
+	{ CLKID_AHB_UART1,	"uart1",	"ahb_div",
+		HW_AHBCLKCTRL0,	12 },
+	{ CLKID_AHB_UART2,	"uart2",	"ahb_div",
+		HW_AHBCLKCTRL0,	13 },
+	{ CLKID_AHB_UART3,	"uart3",	"ahb_div",
+		HW_AHBCLKCTRL0,	14 },
+	{ CLKID_AHB_UART4,	"uart4",	"ahb_div",
+		HW_AHBCLKCTRL0,	15 },
+	{ CLKID_AHB_UART5,	"uart5",	"ahb_div",
+		HW_AHBCLKCTRL0,	16 },
+	{ CLKID_AHB_UART6,	"uart6",	"ahb_div",
+		HW_AHBCLKCTRL0,	17 },
+	{ CLKID_AHB_UART7,	"uart7",	"ahb_div",
+		HW_AHBCLKCTRL0,	18 },
+	{ CLKID_AHB_UART8,	"uart8",	"ahb_div",
+		HW_AHBCLKCTRL0,	19 },
+	{ CLKID_AHB_UART9,	"uart9",	"ahb_div",
+		HW_AHBCLKCTRL0,	20 },
+	{ CLKID_AHB_I2S0,	"i2s0",		"ahb_div",
+		HW_AHBCLKCTRL0,	21 },
+	{ CLKID_AHB_I2C0,	"i2c0",		"ahb_div",
+		HW_AHBCLKCTRL0,	22 },
+	{ CLKID_AHB_I2C1,	"i2c1",		"ahb_div",
+		HW_AHBCLKCTRL0,	23 },
+	{ CLKID_AHB_SSP0,	"ssp0",		"ahb_div",
+		HW_AHBCLKCTRL0,	24 },
+	{ CLKID_AHB_IOCONFIG,	"ioconf",	"ahb_div",
+		HW_AHBCLKCTRL0,	25 },
+	{ CLKID_AHB_WDT,	"wdt",		"ahb_div",
+		HW_AHBCLKCTRL0,	26 },
+	{ CLKID_AHB_CAN0,	"can0",		"ahb_div",
+		HW_AHBCLKCTRL0,	27 },
+	{ CLKID_AHB_CAN1,	"can1",		"ahb_div",
+		HW_AHBCLKCTRL0,	28 },
+	{ CLKID_AHB_MPWM,	"mpwm",		"ahb_div",
+		HW_AHBCLKCTRL0,	29 },
+	{ CLKID_AHB_SPI0,	"spi0",		"ahb_div",
+		HW_AHBCLKCTRL0,	30 },
+	{ CLKID_AHB_SPI1,	"spi1",		"ahb_div",
+		HW_AHBCLKCTRL0,	31 },
+
+	{ CLKID_AHB_QEI,	"qei",		"ahb_div",
+		HW_AHBCLKCTRL1,	0 },
+	{ CLKID_AHB_QUADSPI0,	"quadspi0",	"ahb_div",
+		HW_AHBCLKCTRL1,	1 },
+	{ CLKID_AHB_CAMIF,	"capmif",	"ahb_div",
+		HW_AHBCLKCTRL1,	2 },
+	{ CLKID_AHB_LCDIF,	"lcdif",	"ahb_div",
+		HW_AHBCLKCTRL1,	3 },
+	{ CLKID_AHB_TIMER0,	"timer0",	"ahb_div",
+		HW_AHBCLKCTRL1,	4 },
+	{ CLKID_AHB_TIMER1,	"timer1",	"ahb_div",
+		HW_AHBCLKCTRL1,	5 },
+	{ CLKID_AHB_TIMER2,	"timer2",	"ahb_div",
+		HW_AHBCLKCTRL1,	6 },
+	{ CLKID_AHB_TIMER3,	"timer3",	"ahb_div",
+		HW_AHBCLKCTRL1,	7 },
+	{ CLKID_AHB_IRQ,	"irq",		"ahb_div",
+		HW_AHBCLKCTRL1,	8, CLK_IGNORE_UNUSED},
+	{ CLKID_AHB_RTC,	"rtc",		"ahb_div",
+		HW_AHBCLKCTRL1,	9 },
+	{ CLKID_AHB_NAND,	"nand",		"ahb_div",
+		HW_AHBCLKCTRL1,	10 },
+	{ CLKID_AHB_ADC0,	"adc0",		"ahb_div",
+		HW_AHBCLKCTRL1,	11 },
+	{ CLKID_AHB_LED,	"led",		"ahb_div",
+		HW_AHBCLKCTRL1,	12 },
+	{ CLKID_AHB_DAC0,	"dac0",		"ahb_div",
+		HW_AHBCLKCTRL1,	13 },
+	{ CLKID_AHB_LCD,	"lcd",		"ahb_div",
+		HW_AHBCLKCTRL1,	14 },
+	{ CLKID_AHB_I2S1,	"i2s1",		"ahb_div",
+		HW_AHBCLKCTRL1,	15 },
+	{ CLKID_AHB_MAC1,	"mac1",		"ahb_div",
+		HW_AHBCLKCTRL1,	16 },
+};
+
+static const char __initdata *main_mux_p[] =   { NULL, NULL };
+static const char __initdata *i2s0_mux_p[] =   { NULL, NULL, "i2s0m_div"};
+static const char __initdata *i2s1_mux_p[] =   { NULL, NULL, "i2s1m_div"};
+static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static u32 three_mux_table[] = {0, 1, 3};
+
+static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+	{ 1, three_mux_table, "main_mux",	main_mux_p,
+		ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
+	{ 1, three_mux_table, "uart_mux",	main_mux_p,
+		ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
+	{ 1, three_mux_table, "wdt_mux",	main_mux_p,
+		ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
+	{ 3, three_mux_table, "i2s0_mux",	i2s0_mux_p,
+		ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
+	{ 3, three_mux_table, "i2s1_mux",	i2s1_mux_p,
+		ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
+	{ 3, three_mux_table, "clkout_mux",	clkout_mux_p,
+		ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
+};
+
+static void __init asm9260_acc_init(struct device_node *np)
+{
+	struct clk *clk;
+	const char *ref_clk, *pll_clk = "pll";
+	u32 rate;
+	int n;
+	u32 accuracy = 0;
+
+	base = of_io_request_and_map(np, 0, np->name);
+	if (!base)
+		panic("%s: unable to map resource", np->name);
+
+	/* register pll */
+	rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+
+	ref_clk = of_clk_get_parent_name(np, 0);
+	accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
+	clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
+			ref_clk, 0, rate, accuracy);
+
+	if (IS_ERR(clk))
+		panic("%s: can't register REFCLK. Check DT!", np->name);
+
+	for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
+		const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
+
+		mc->parent_names[0] = ref_clk;
+		mc->parent_names[1] = pll_clk;
+		clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
+				mc->num_parents, mc->flags, base + mc->offset,
+				0, mc->mask, 0, mc->table, &asm9260_clk_lock);
+	}
+
+	/* clock mux gate cells */
+	for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
+		const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
+
+		clk = clk_register_gate(NULL, gd->name,
+			gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
+			base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
+	}
+
+	/* clock div cells */
+	for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
+		const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
+
+		clks[dc->idx] = clk_register_divider(NULL, dc->name,
+				dc->parent_name, CLK_SET_RATE_PARENT,
+				base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
+				&asm9260_clk_lock);
+	}
+
+	/* clock ahb gate cells */
+	for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
+		const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
+
+		clks[gd->idx] = clk_register_gate(NULL, gd->name,
+				gd->parent_name, gd->flags, base + gd->reg,
+				gd->bit_idx, 0, &asm9260_clk_lock);
+	}
+
+	/* check for errors on leaf clocks */
+	for (n = 0; n < MAX_CLKS; n++) {
+		if (!IS_ERR(clks[n]))
+			continue;
+
+		pr_err("%s: Unable to register leaf clock %d\n",
+				np->full_name, n);
+		goto fail;
+	}
+
+	/* register clk-provider */
+	clk_data.clks = clks;
+	clk_data.clk_num = MAX_CLKS;
+	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+	return;
+fail:
+	iounmap(base);
+}
+CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
+		asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644
index 0000000..c386ad2
--- /dev/null
+++ b/drivers/clk/clk-cdce706.c
@@ -0,0 +1,700 @@
+/*
+ * TI CDCE706 programmable 3-PLL clock synthesizer driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CDCE706_CLKIN_CLOCK		10
+#define CDCE706_CLKIN_SOURCE		11
+#define CDCE706_PLL_M_LOW(pll)		(1 + 3 * (pll))
+#define CDCE706_PLL_N_LOW(pll)		(2 + 3 * (pll))
+#define CDCE706_PLL_HI(pll)		(3 + 3 * (pll))
+#define CDCE706_PLL_MUX			3
+#define CDCE706_PLL_FVCO		6
+#define CDCE706_DIVIDER(div)		(13 + (div))
+#define CDCE706_CLKOUT(out)		(19 + (out))
+
+#define CDCE706_CLKIN_CLOCK_MASK	0x10
+#define CDCE706_CLKIN_SOURCE_SHIFT	6
+#define CDCE706_CLKIN_SOURCE_MASK	0xc0
+#define CDCE706_CLKIN_SOURCE_LVCMOS	0x40
+
+#define CDCE706_PLL_MUX_MASK(pll)	(0x80 >> (pll))
+#define CDCE706_PLL_LOW_M_MASK		0xff
+#define CDCE706_PLL_LOW_N_MASK		0xff
+#define CDCE706_PLL_HI_M_MASK		0x1
+#define CDCE706_PLL_HI_N_MASK		0x1e
+#define CDCE706_PLL_HI_N_SHIFT		1
+#define CDCE706_PLL_M_MAX		0x1ff
+#define CDCE706_PLL_N_MAX		0xfff
+#define CDCE706_PLL_FVCO_MASK(pll)	(0x80 >> (pll))
+#define CDCE706_PLL_FREQ_MIN		 80000000
+#define CDCE706_PLL_FREQ_MAX		300000000
+#define CDCE706_PLL_FREQ_HI		180000000
+
+#define CDCE706_DIVIDER_PLL(div)	(9 + (div) - ((div) > 2) - ((div) > 4))
+#define CDCE706_DIVIDER_PLL_SHIFT(div)	((div) < 2 ? 5 : 3 * ((div) & 1))
+#define CDCE706_DIVIDER_PLL_MASK(div)	(0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
+#define CDCE706_DIVIDER_DIVIDER_MASK	0x7f
+#define CDCE706_DIVIDER_DIVIDER_MAX	0x7f
+
+#define CDCE706_CLKOUT_DIVIDER_MASK	0x7
+#define CDCE706_CLKOUT_ENABLE_MASK	0x8
+
+static struct regmap_config cdce706_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
+
+struct cdce706_hw_data {
+	struct cdce706_dev_data *dev_data;
+	unsigned idx;
+	unsigned parent;
+	struct clk *clk;
+	struct clk_hw hw;
+	unsigned div;
+	unsigned mul;
+	unsigned mux;
+};
+
+struct cdce706_dev_data {
+	struct i2c_client *client;
+	struct regmap *regmap;
+	struct clk_onecell_data onecell;
+	struct clk *clks[6];
+	struct clk *clkin_clk[2];
+	const char *clkin_name[2];
+	struct cdce706_hw_data clkin[1];
+	struct cdce706_hw_data pll[3];
+	struct cdce706_hw_data divider[6];
+	struct cdce706_hw_data clkout[6];
+};
+
+static const char * const cdce706_source_name[] = {
+	"clk_in0", "clk_in1",
+};
+
+static const char *cdce706_clkin_name[] = {
+	"clk_in",
+};
+
+static const char * const cdce706_pll_name[] = {
+	"pll1", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_parent_name[] = {
+	"clk_in", "pll1", "pll2", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_name[] = {
+	"p0", "p1", "p2", "p3", "p4", "p5",
+};
+
+static const char * const cdce706_clkout_name[] = {
+	"clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
+};
+
+static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
+			    unsigned *val)
+{
+	int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
+
+	if (rc < 0)
+		dev_err(&dev_data->client->dev, "error reading reg %u", reg);
+	return rc;
+}
+
+static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
+			     unsigned val)
+{
+	int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
+
+	if (rc < 0)
+		dev_err(&dev_data->client->dev, "error writing reg %u", reg);
+	return rc;
+}
+
+static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
+			      unsigned mask, unsigned val)
+{
+	int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
+
+	if (rc < 0)
+		dev_err(&dev_data->client->dev, "error updating reg %u", reg);
+	return rc;
+}
+
+static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	hwd->parent = index;
+	return 0;
+}
+
+static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	return hwd->parent;
+}
+
+static const struct clk_ops cdce706_clkin_ops = {
+	.set_parent = cdce706_clkin_set_parent,
+	.get_parent = cdce706_clkin_get_parent,
+};
+
+static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, pll: %d, mux: %d, mul: %u, div: %u\n",
+		__func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
+
+	if (!hwd->mux) {
+		if (hwd->div && hwd->mul) {
+			u64 res = (u64)parent_rate * hwd->mul;
+
+			do_div(res, hwd->div);
+			return res;
+		}
+	} else {
+		if (hwd->div)
+			return parent_rate / hwd->div;
+	}
+	return 0;
+}
+
+static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long *parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+	unsigned long mul, div;
+	u64 res;
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, rate: %lu, parent_rate: %lu\n",
+		__func__, rate, *parent_rate);
+
+	rational_best_approximation(rate, *parent_rate,
+				    CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
+				    &mul, &div);
+	hwd->mul = mul;
+	hwd->div = div;
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, pll: %d, mul: %lu, div: %lu\n",
+		__func__, hwd->idx, mul, div);
+
+	res = (u64)*parent_rate * hwd->mul;
+	do_div(res, hwd->div);
+	return res;
+}
+
+static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+	unsigned long mul = hwd->mul, div = hwd->div;
+	int err;
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, pll: %d, mul: %lu, div: %lu\n",
+		__func__, hwd->idx, mul, div);
+
+	err = cdce706_reg_update(hwd->dev_data,
+				 CDCE706_PLL_HI(hwd->idx),
+				 CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
+				 ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
+				 ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
+				  CDCE706_PLL_HI_N_MASK));
+	if (err < 0)
+		return err;
+
+	err = cdce706_reg_write(hwd->dev_data,
+				CDCE706_PLL_M_LOW(hwd->idx),
+				div & CDCE706_PLL_LOW_M_MASK);
+	if (err < 0)
+		return err;
+
+	err = cdce706_reg_write(hwd->dev_data,
+				CDCE706_PLL_N_LOW(hwd->idx),
+				mul & CDCE706_PLL_LOW_N_MASK);
+	if (err < 0)
+		return err;
+
+	err = cdce706_reg_update(hwd->dev_data,
+				 CDCE706_PLL_FVCO,
+				 CDCE706_PLL_FVCO_MASK(hwd->idx),
+				 rate > CDCE706_PLL_FREQ_HI ?
+				 CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
+	return err;
+}
+
+static const struct clk_ops cdce706_pll_ops = {
+	.recalc_rate = cdce706_pll_recalc_rate,
+	.round_rate = cdce706_pll_round_rate,
+	.set_rate = cdce706_pll_set_rate,
+};
+
+static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	if (hwd->parent == index)
+		return 0;
+	hwd->parent = index;
+	return cdce706_reg_update(hwd->dev_data,
+				  CDCE706_DIVIDER_PLL(hwd->idx),
+				  CDCE706_DIVIDER_PLL_MASK(hwd->idx),
+				  index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
+}
+
+static u8 cdce706_divider_get_parent(struct clk_hw *hw)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	return hwd->parent;
+}
+
+static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
+						 unsigned long parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, divider: %d, div: %u\n",
+		__func__, hwd->idx, hwd->div);
+	if (hwd->div)
+		return parent_rate / hwd->div;
+	return 0;
+}
+
+static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long *parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+	struct cdce706_dev_data *cdce = hwd->dev_data;
+	unsigned long mul, div;
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, rate: %lu, parent_rate: %lu\n",
+		__func__, rate, *parent_rate);
+
+	rational_best_approximation(rate, *parent_rate,
+				    1, CDCE706_DIVIDER_DIVIDER_MAX,
+				    &mul, &div);
+	if (!mul)
+		div = CDCE706_DIVIDER_DIVIDER_MAX;
+
+	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+		unsigned long best_diff = rate;
+		unsigned long best_div = 0;
+		struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
+		unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
+
+		for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
+		     div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
+			unsigned long n, m;
+			unsigned long diff;
+			unsigned long div_rate;
+			u64 div_rate64;
+
+			if (rate * div < CDCE706_PLL_FREQ_MIN)
+				continue;
+
+			rational_best_approximation(rate * div, gp_rate,
+						    CDCE706_PLL_N_MAX,
+						    CDCE706_PLL_M_MAX,
+						    &n, &m);
+			div_rate64 = (u64)gp_rate * n;
+			do_div(div_rate64, m);
+			do_div(div_rate64, div);
+			div_rate = div_rate64;
+			diff = max(div_rate, rate) - min(div_rate, rate);
+
+			if (diff < best_diff) {
+				best_diff = diff;
+				best_div = div;
+				dev_dbg(&hwd->dev_data->client->dev,
+					"%s, %lu * %lu / %lu / %lu = %lu\n",
+					__func__, gp_rate, n, m, div, div_rate);
+			}
+		}
+
+		div = best_div;
+
+		dev_dbg(&hwd->dev_data->client->dev,
+			"%s, altering parent rate: %lu -> %lu\n",
+			__func__, *parent_rate, rate * div);
+		*parent_rate = rate * div;
+	}
+	hwd->div = div;
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, divider: %d, div: %lu\n",
+		__func__, hwd->idx, div);
+
+	return *parent_rate / div;
+}
+
+static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	dev_dbg(&hwd->dev_data->client->dev,
+		"%s, divider: %d, div: %u\n",
+		__func__, hwd->idx, hwd->div);
+
+	return cdce706_reg_update(hwd->dev_data,
+				  CDCE706_DIVIDER(hwd->idx),
+				  CDCE706_DIVIDER_DIVIDER_MASK,
+				  hwd->div);
+}
+
+static const struct clk_ops cdce706_divider_ops = {
+	.set_parent = cdce706_divider_set_parent,
+	.get_parent = cdce706_divider_get_parent,
+	.recalc_rate = cdce706_divider_recalc_rate,
+	.round_rate = cdce706_divider_round_rate,
+	.set_rate = cdce706_divider_set_rate,
+};
+
+static int cdce706_clkout_prepare(struct clk_hw *hw)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+				  CDCE706_CLKOUT_ENABLE_MASK,
+				  CDCE706_CLKOUT_ENABLE_MASK);
+}
+
+static void cdce706_clkout_unprepare(struct clk_hw *hw)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+			   CDCE706_CLKOUT_ENABLE_MASK, 0);
+}
+
+static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	if (hwd->parent == index)
+		return 0;
+	hwd->parent = index;
+	return cdce706_reg_update(hwd->dev_data,
+				  CDCE706_CLKOUT(hwd->idx),
+				  CDCE706_CLKOUT_ENABLE_MASK, index);
+}
+
+static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
+{
+	struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+	return hwd->parent;
+}
+
+static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	return parent_rate;
+}
+
+static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+				      unsigned long *parent_rate)
+{
+	*parent_rate = rate;
+	return rate;
+}
+
+static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long parent_rate)
+{
+	return 0;
+}
+
+static const struct clk_ops cdce706_clkout_ops = {
+	.prepare = cdce706_clkout_prepare,
+	.unprepare = cdce706_clkout_unprepare,
+	.set_parent = cdce706_clkout_set_parent,
+	.get_parent = cdce706_clkout_get_parent,
+	.recalc_rate = cdce706_clkout_recalc_rate,
+	.round_rate = cdce706_clkout_round_rate,
+	.set_rate = cdce706_clkout_set_rate,
+};
+
+static int cdce706_register_hw(struct cdce706_dev_data *cdce,
+			       struct cdce706_hw_data *hw, unsigned num_hw,
+			       const char * const *clk_names,
+			       struct clk_init_data *init)
+{
+	unsigned i;
+
+	for (i = 0; i < num_hw; ++i, ++hw) {
+		init->name = clk_names[i];
+		hw->dev_data = cdce;
+		hw->idx = i;
+		hw->hw.init = init;
+		hw->clk = devm_clk_register(&cdce->client->dev,
+					    &hw->hw);
+		if (IS_ERR(hw->clk)) {
+			dev_err(&cdce->client->dev, "Failed to register %s\n",
+				clk_names[i]);
+			return PTR_ERR(hw->clk);
+		}
+	}
+	return 0;
+}
+
+static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
+{
+	struct clk_init_data init = {
+		.ops = &cdce706_clkin_ops,
+		.parent_names = cdce->clkin_name,
+		.num_parents = ARRAY_SIZE(cdce->clkin_name),
+	};
+	unsigned i;
+	int ret;
+	unsigned clock, source;
+
+	for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
+		struct clk *parent = devm_clk_get(&cdce->client->dev,
+						  cdce706_source_name[i]);
+
+		if (IS_ERR(parent)) {
+			cdce->clkin_name[i] = cdce706_source_name[i];
+		} else {
+			cdce->clkin_name[i] = __clk_get_name(parent);
+			cdce->clkin_clk[i] = parent;
+		}
+	}
+
+	ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
+	if (ret < 0)
+		return ret;
+	if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
+	    CDCE706_CLKIN_SOURCE_LVCMOS) {
+		ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
+		if (ret < 0)
+			return ret;
+		cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
+	}
+
+	ret = cdce706_register_hw(cdce, cdce->clkin,
+				  ARRAY_SIZE(cdce->clkin),
+				  cdce706_clkin_name, &init);
+	return ret;
+}
+
+static int cdce706_register_plls(struct cdce706_dev_data *cdce)
+{
+	struct clk_init_data init = {
+		.ops = &cdce706_pll_ops,
+		.parent_names = cdce706_clkin_name,
+		.num_parents = ARRAY_SIZE(cdce706_clkin_name),
+	};
+	unsigned i;
+	int ret;
+	unsigned mux;
+
+	ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
+		unsigned m, n, v;
+
+		ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
+		if (ret < 0)
+			return ret;
+		ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
+		if (ret < 0)
+			return ret;
+		ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
+		if (ret < 0)
+			return ret;
+		cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
+		cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
+					(8 - CDCE706_PLL_HI_N_SHIFT));
+		cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
+		dev_dbg(&cdce->client->dev,
+			"%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
+			cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
+	}
+
+	ret = cdce706_register_hw(cdce, cdce->pll,
+				  ARRAY_SIZE(cdce->pll),
+				  cdce706_pll_name, &init);
+	return ret;
+}
+
+static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
+{
+	struct clk_init_data init = {
+		.ops = &cdce706_divider_ops,
+		.parent_names = cdce706_divider_parent_name,
+		.num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
+		.flags = CLK_SET_RATE_PARENT,
+	};
+	unsigned i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
+		unsigned val;
+
+		ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
+		if (ret < 0)
+			return ret;
+		cdce->divider[i].parent =
+			(val & CDCE706_DIVIDER_PLL_MASK(i)) >>
+			CDCE706_DIVIDER_PLL_SHIFT(i);
+
+		ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
+		if (ret < 0)
+			return ret;
+		cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
+		dev_dbg(&cdce->client->dev,
+			"%s: i: %u, parent: %u, div: %u\n", __func__, i,
+			cdce->divider[i].parent, cdce->divider[i].div);
+	}
+
+	ret = cdce706_register_hw(cdce, cdce->divider,
+				  ARRAY_SIZE(cdce->divider),
+				  cdce706_divider_name, &init);
+	return ret;
+}
+
+static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
+{
+	struct clk_init_data init = {
+		.ops = &cdce706_clkout_ops,
+		.parent_names = cdce706_divider_name,
+		.num_parents = ARRAY_SIZE(cdce706_divider_name),
+		.flags = CLK_SET_RATE_PARENT,
+	};
+	unsigned i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
+		unsigned val;
+
+		ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
+		if (ret < 0)
+			return ret;
+		cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
+		dev_dbg(&cdce->client->dev,
+			"%s: i: %u, parent: %u\n", __func__, i,
+			cdce->clkout[i].parent);
+	}
+
+	ret = cdce706_register_hw(cdce, cdce->clkout,
+				  ARRAY_SIZE(cdce->clkout),
+				  cdce706_clkout_name, &init);
+	for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
+		cdce->clks[i] = cdce->clkout[i].clk;
+
+	return ret;
+}
+
+static int cdce706_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+	struct cdce706_dev_data *cdce;
+	int ret;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -EIO;
+
+	cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
+	if (!cdce)
+		return -ENOMEM;
+
+	cdce->client = client;
+	cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
+	if (IS_ERR(cdce->regmap)) {
+		dev_err(&client->dev, "Failed to initialize regmap\n");
+		return -EINVAL;
+	}
+
+	i2c_set_clientdata(client, cdce);
+
+	ret = cdce706_register_clkin(cdce);
+	if (ret < 0)
+		return ret;
+	ret = cdce706_register_plls(cdce);
+	if (ret < 0)
+		return ret;
+	ret = cdce706_register_dividers(cdce);
+	if (ret < 0)
+		return ret;
+	ret = cdce706_register_clkouts(cdce);
+	if (ret < 0)
+		return ret;
+	cdce->onecell.clks = cdce->clks;
+	cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
+	ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+				  &cdce->onecell);
+
+	return ret;
+}
+
+static int cdce706_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdce706_dt_match[] = {
+	{ .compatible = "ti,cdce706" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, cdce706_dt_match);
+#endif
+
+static const struct i2c_device_id cdce706_id[] = {
+	{ "cdce706", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, cdce706_id);
+
+static struct i2c_driver cdce706_i2c_driver = {
+	.driver	= {
+		.name	= "cdce706",
+		.of_match_table = of_match_ptr(cdce706_dt_match),
+	},
+	.probe		= cdce706_probe,
+	.remove		= cdce706_remove,
+	.id_table	= cdce706_id,
+};
+module_i2c_driver(cdce706_i2c_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4386697..956b7e5 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -27,7 +27,7 @@
 	const struct clk_ops *mux_ops = composite->mux_ops;
 	struct clk_hw *mux_hw = composite->mux_hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return mux_ops->get_parent(mux_hw);
 }
@@ -38,7 +38,7 @@
 	const struct clk_ops *mux_ops = composite->mux_ops;
 	struct clk_hw *mux_hw = composite->mux_hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return mux_ops->set_parent(mux_hw, index);
 }
@@ -50,12 +50,14 @@
 	const struct clk_ops *rate_ops = composite->rate_ops;
 	struct clk_hw *rate_hw = composite->rate_hw;
 
-	rate_hw->clk = hw->clk;
+	__clk_hw_set_clk(rate_hw, hw);
 
 	return rate_ops->recalc_rate(rate_hw, parent_rate);
 }
 
 static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_p)
 {
@@ -72,8 +74,10 @@
 	int i;
 
 	if (rate_hw && rate_ops && rate_ops->determine_rate) {
-		rate_hw->clk = hw->clk;
-		return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+		__clk_hw_set_clk(rate_hw, hw);
+		return rate_ops->determine_rate(rate_hw, rate, min_rate,
+						max_rate,
+						best_parent_rate,
 						best_parent_p);
 	} else if (rate_hw && rate_ops && rate_ops->round_rate &&
 		   mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@
 
 		return best_rate;
 	} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
-		mux_hw->clk = hw->clk;
-		return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+		__clk_hw_set_clk(mux_hw, hw);
+		return mux_ops->determine_rate(mux_hw, rate, min_rate,
+					       max_rate, best_parent_rate,
 					       best_parent_p);
 	} else {
 		pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@
 	const struct clk_ops *rate_ops = composite->rate_ops;
 	struct clk_hw *rate_hw = composite->rate_hw;
 
-	rate_hw->clk = hw->clk;
+	__clk_hw_set_clk(rate_hw, hw);
 
 	return rate_ops->round_rate(rate_hw, rate, prate);
 }
@@ -144,7 +149,7 @@
 	const struct clk_ops *rate_ops = composite->rate_ops;
 	struct clk_hw *rate_hw = composite->rate_hw;
 
-	rate_hw->clk = hw->clk;
+	__clk_hw_set_clk(rate_hw, hw);
 
 	return rate_ops->set_rate(rate_hw, rate, parent_rate);
 }
@@ -155,7 +160,7 @@
 	const struct clk_ops *gate_ops = composite->gate_ops;
 	struct clk_hw *gate_hw = composite->gate_hw;
 
-	gate_hw->clk = hw->clk;
+	__clk_hw_set_clk(gate_hw, hw);
 
 	return gate_ops->is_enabled(gate_hw);
 }
@@ -166,7 +171,7 @@
 	const struct clk_ops *gate_ops = composite->gate_ops;
 	struct clk_hw *gate_hw = composite->gate_hw;
 
-	gate_hw->clk = hw->clk;
+	__clk_hw_set_clk(gate_hw, hw);
 
 	return gate_ops->enable(gate_hw);
 }
@@ -177,7 +182,7 @@
 	const struct clk_ops *gate_ops = composite->gate_ops;
 	struct clk_hw *gate_hw = composite->gate_hw;
 
-	gate_hw->clk = hw->clk;
+	__clk_hw_set_clk(gate_hw, hw);
 
 	gate_ops->disable(gate_hw);
 }
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c0a842b..25006a8 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -30,7 +30,7 @@
 
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
-#define div_mask(d)	((1 << ((d)->width)) - 1)
+#define div_mask(width)	((1 << (width)) - 1)
 
 static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
 {
@@ -54,15 +54,16 @@
 	return mindiv;
 }
 
-static unsigned int _get_maxdiv(struct clk_divider *divider)
+static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
+				unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_ONE_BASED)
-		return div_mask(divider);
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
-		return 1 << div_mask(divider);
-	if (divider->table)
-		return _get_table_maxdiv(divider->table);
-	return div_mask(divider) + 1;
+	if (flags & CLK_DIVIDER_ONE_BASED)
+		return div_mask(width);
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
+		return 1 << div_mask(width);
+	if (table)
+		return _get_table_maxdiv(table);
+	return div_mask(width) + 1;
 }
 
 static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@
 	return 0;
 }
 
-static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+static unsigned int _get_div(const struct clk_div_table *table,
+			     unsigned int val, unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_ONE_BASED)
+	if (flags & CLK_DIVIDER_ONE_BASED)
 		return val;
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return 1 << val;
-	if (divider->table)
-		return _get_table_div(divider->table, val);
+	if (table)
+		return _get_table_div(table, val);
 	return val + 1;
 }
 
@@ -98,29 +100,28 @@
 	return 0;
 }
 
-static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+static unsigned int _get_val(const struct clk_div_table *table,
+			     unsigned int div, unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_ONE_BASED)
+	if (flags & CLK_DIVIDER_ONE_BASED)
 		return div;
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return __ffs(div);
-	if (divider->table)
-		return  _get_table_val(divider->table, div);
+	if (table)
+		return  _get_table_val(table, div);
 	return div - 1;
 }
 
-static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
-		unsigned long parent_rate)
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+				  unsigned int val,
+				  const struct clk_div_table *table,
+				  unsigned long flags)
 {
-	struct clk_divider *divider = to_clk_divider(hw);
-	unsigned int div, val;
+	unsigned int div;
 
-	val = clk_readl(divider->reg) >> divider->shift;
-	val &= div_mask(divider);
-
-	div = _get_div(divider, val);
+	div = _get_div(table, val, flags);
 	if (!div) {
-		WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+		WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
 			"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
 			__clk_get_name(hw->clk));
 		return parent_rate;
@@ -128,12 +129,20 @@
 
 	return DIV_ROUND_UP(parent_rate, div);
 }
+EXPORT_SYMBOL_GPL(divider_recalc_rate);
 
-/*
- * The reverse of DIV_ROUND_UP: The maximum number which
- * divided by m is r
- */
-#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_divider *divider = to_clk_divider(hw);
+	unsigned int val;
+
+	val = clk_readl(divider->reg) >> divider->shift;
+	val &= div_mask(divider->width);
+
+	return divider_recalc_rate(hw, parent_rate, val, divider->table,
+				   divider->flags);
+}
 
 static bool _is_valid_table_div(const struct clk_div_table *table,
 							 unsigned int div)
@@ -146,12 +155,13 @@
 	return false;
 }
 
-static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
+			  unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return is_power_of_2(div);
-	if (divider->table)
-		return _is_valid_table_div(divider->table, div);
+	if (table)
+		return _is_valid_table_div(table, div);
 	return true;
 }
 
@@ -191,71 +201,81 @@
 	return down;
 }
 
-static int _div_round_up(struct clk_divider *divider,
-		unsigned long parent_rate, unsigned long rate)
+static int _div_round_up(const struct clk_div_table *table,
+			 unsigned long parent_rate, unsigned long rate,
+			 unsigned long flags)
 {
 	int div = DIV_ROUND_UP(parent_rate, rate);
 
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		div = __roundup_pow_of_two(div);
-	if (divider->table)
-		div = _round_up_table(divider->table, div);
+	if (table)
+		div = _round_up_table(table, div);
 
 	return div;
 }
 
-static int _div_round_closest(struct clk_divider *divider,
-		unsigned long parent_rate, unsigned long rate)
+static int _div_round_closest(const struct clk_div_table *table,
+			      unsigned long parent_rate, unsigned long rate,
+			      unsigned long flags)
 {
-	int up, down, div;
+	int up, down;
+	unsigned long up_rate, down_rate;
 
-	up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
+	up = DIV_ROUND_UP(parent_rate, rate);
+	down = parent_rate / rate;
 
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) {
-		up = __roundup_pow_of_two(div);
-		down = __rounddown_pow_of_two(div);
-	} else if (divider->table) {
-		up = _round_up_table(divider->table, div);
-		down = _round_down_table(divider->table, div);
+	if (flags & CLK_DIVIDER_POWER_OF_TWO) {
+		up = __roundup_pow_of_two(up);
+		down = __rounddown_pow_of_two(down);
+	} else if (table) {
+		up = _round_up_table(table, up);
+		down = _round_down_table(table, down);
 	}
 
-	return (up - div) <= (div - down) ? up : down;
+	up_rate = DIV_ROUND_UP(parent_rate, up);
+	down_rate = DIV_ROUND_UP(parent_rate, down);
+
+	return (rate - up_rate) <= (down_rate - rate) ? up : down;
 }
 
-static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
-		unsigned long rate)
+static int _div_round(const struct clk_div_table *table,
+		      unsigned long parent_rate, unsigned long rate,
+		      unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
-		return _div_round_closest(divider, parent_rate, rate);
+	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+		return _div_round_closest(table, parent_rate, rate, flags);
 
-	return _div_round_up(divider, parent_rate, rate);
+	return _div_round_up(table, parent_rate, rate, flags);
 }
 
-static bool _is_best_div(struct clk_divider *divider,
-		unsigned long rate, unsigned long now, unsigned long best)
+static bool _is_best_div(unsigned long rate, unsigned long now,
+			 unsigned long best, unsigned long flags)
 {
-	if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
 		return abs(rate - now) < abs(rate - best);
 
 	return now <= rate && now > best;
 }
 
-static int _next_div(struct clk_divider *divider, int div)
+static int _next_div(const struct clk_div_table *table, int div,
+		     unsigned long flags)
 {
 	div++;
 
-	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return __roundup_pow_of_two(div);
-	if (divider->table)
-		return _round_up_table(divider->table, div);
+	if (table)
+		return _round_up_table(table, div);
 
 	return div;
 }
 
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
-		unsigned long *best_parent_rate)
+			       unsigned long *best_parent_rate,
+			       const struct clk_div_table *table, u8 width,
+			       unsigned long flags)
 {
-	struct clk_divider *divider = to_clk_divider(hw);
 	int i, bestdiv = 0;
 	unsigned long parent_rate, best = 0, now, maxdiv;
 	unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +283,11 @@
 	if (!rate)
 		rate = 1;
 
-	/* if read only, just return current value */
-	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
-		bestdiv = readl(divider->reg) >> divider->shift;
-		bestdiv &= div_mask(divider);
-		bestdiv = _get_div(divider, bestdiv);
-		return bestdiv;
-	}
-
-	maxdiv = _get_maxdiv(divider);
+	maxdiv = _get_maxdiv(table, width, flags);
 
 	if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
 		parent_rate = *best_parent_rate;
-		bestdiv = _div_round(divider, parent_rate, rate);
+		bestdiv = _div_round(table, parent_rate, rate, flags);
 		bestdiv = bestdiv == 0 ? 1 : bestdiv;
 		bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
 		return bestdiv;
@@ -287,8 +299,8 @@
 	 */
 	maxdiv = min(ULONG_MAX / rate, maxdiv);
 
-	for (i = 1; i <= maxdiv; i = _next_div(divider, i)) {
-		if (!_is_valid_div(divider, i))
+	for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
+		if (!_is_valid_div(table, i, flags))
 			continue;
 		if (rate * i == parent_rate_saved) {
 			/*
@@ -300,9 +312,9 @@
 			return i;
 		}
 		parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
-				MULT_ROUND_UP(rate, i));
+					       rate * i);
 		now = DIV_ROUND_UP(parent_rate, i);
-		if (_is_best_div(divider, rate, now, best)) {
+		if (_is_best_div(rate, now, best, flags)) {
 			bestdiv = i;
 			best = now;
 			*best_parent_rate = parent_rate;
@@ -310,48 +322,79 @@
 	}
 
 	if (!bestdiv) {
-		bestdiv = _get_maxdiv(divider);
+		bestdiv = _get_maxdiv(table, width, flags);
 		*best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
 	}
 
 	return bestdiv;
 }
 
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long *prate)
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long *prate, const struct clk_div_table *table,
+			u8 width, unsigned long flags)
 {
 	int div;
-	div = clk_divider_bestdiv(hw, rate, prate);
+
+	div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
 
 	return DIV_ROUND_UP(*prate, div);
 }
+EXPORT_SYMBOL_GPL(divider_round_rate);
+
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *prate)
+{
+	struct clk_divider *divider = to_clk_divider(hw);
+	int bestdiv;
+
+	/* if read only, just return current value */
+	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+		bestdiv = readl(divider->reg) >> divider->shift;
+		bestdiv &= div_mask(divider->width);
+		bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+		return DIV_ROUND_UP(*prate, bestdiv);
+	}
+
+	return divider_round_rate(hw, rate, prate, divider->table,
+				  divider->width, divider->flags);
+}
+
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+		    const struct clk_div_table *table, u8 width,
+		    unsigned long flags)
+{
+	unsigned int div, value;
+
+	div = DIV_ROUND_UP(parent_rate, rate);
+
+	if (!_is_valid_div(table, div, flags))
+		return -EINVAL;
+
+	value = _get_val(table, div, flags);
+
+	return min_t(unsigned int, value, div_mask(width));
+}
+EXPORT_SYMBOL_GPL(divider_get_val);
 
 static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
 				unsigned long parent_rate)
 {
 	struct clk_divider *divider = to_clk_divider(hw);
-	unsigned int div, value;
+	unsigned int value;
 	unsigned long flags = 0;
 	u32 val;
 
-	div = DIV_ROUND_UP(parent_rate, rate);
-
-	if (!_is_valid_div(divider, div))
-		return -EINVAL;
-
-	value = _get_val(divider, div);
-
-	if (value > div_mask(divider))
-		value = div_mask(divider);
+	value = divider_get_val(rate, parent_rate, divider->table,
+				divider->width, divider->flags);
 
 	if (divider->lock)
 		spin_lock_irqsave(divider->lock, flags);
 
 	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
-		val = div_mask(divider) << (divider->shift + 16);
+		val = div_mask(divider->width) << (divider->shift + 16);
 	} else {
 		val = clk_readl(divider->reg);
-		val &= ~(div_mask(divider) << divider->shift);
+		val &= ~(div_mask(divider->width) << divider->shift);
 	}
 	val |= value << divider->shift;
 	clk_writel(val, divider->reg);
@@ -463,3 +506,19 @@
 			width, clk_divider_flags, table, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
+
+void clk_unregister_divider(struct clk *clk)
+{
+	struct clk_divider *div;
+	struct clk_hw *hw;
+
+	hw = __clk_get_hw(clk);
+	if (!hw)
+		return;
+
+	div = to_clk_divider(hw);
+
+	clk_unregister(clk);
+	kfree(div);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_divider);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 51fd87f..3f0e420 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -128,7 +128,7 @@
 	struct clk_init_data init;
 
 	if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
-		if (bit_idx > 16) {
+		if (bit_idx > 15) {
 			pr_err("gate bit exceeds LOWORD field\n");
 			return ERR_PTR(-EINVAL);
 		}
@@ -162,3 +162,19 @@
 	return clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_gate);
+
+void clk_unregister_gate(struct clk *clk)
+{
+	struct clk_gate *gate;
+	struct clk_hw *hw;
+
+	hw = __clk_get_hw(clk);
+	if (!hw)
+		return;
+
+	gate = to_clk_gate(hw);
+
+	clk_unregister(clk);
+	kfree(gate);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 6e1ecf9..69a094c 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -177,3 +177,19 @@
 				      NULL, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_mux);
+
+void clk_unregister_mux(struct clk *clk)
+{
+	struct clk_mux *mux;
+	struct clk_hw *hw;
+
+	hw = __clk_get_hw(clk);
+	if (!hw)
+		return;
+
+	mux = to_clk_mux(hw);
+
+	clk_unregister(clk);
+	kfree(mux);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
deleted file mode 100644
index 0a47d6f..0000000
--- a/drivers/clk/clk-ppc-corenet.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * clock driver for Freescale PowerPC corenet SoCs.
- */
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-struct cmux_clk {
-	struct clk_hw hw;
-	void __iomem *reg;
-	u32 flags;
-};
-
-#define PLL_KILL			BIT(31)
-#define	CLKSEL_SHIFT		27
-#define CLKSEL_ADJUST		BIT(0)
-#define to_cmux_clk(p)		container_of(p, struct cmux_clk, hw)
-
-static unsigned int clocks_per_pll;
-
-static int cmux_set_parent(struct clk_hw *hw, u8 idx)
-{
-	struct cmux_clk *clk = to_cmux_clk(hw);
-	u32 clksel;
-
-	clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll;
-	if (clk->flags & CLKSEL_ADJUST)
-		clksel += 8;
-	clksel = (clksel & 0xf) << CLKSEL_SHIFT;
-	iowrite32be(clksel, clk->reg);
-
-	return 0;
-}
-
-static u8 cmux_get_parent(struct clk_hw *hw)
-{
-	struct cmux_clk *clk = to_cmux_clk(hw);
-	u32 clksel;
-
-	clksel = ioread32be(clk->reg);
-	clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
-	if (clk->flags & CLKSEL_ADJUST)
-		clksel -= 8;
-	clksel = (clksel >> 2) * clocks_per_pll + clksel % 4;
-
-	return clksel;
-}
-
-const struct clk_ops cmux_ops = {
-	.get_parent = cmux_get_parent,
-	.set_parent = cmux_set_parent,
-};
-
-static void __init core_mux_init(struct device_node *np)
-{
-	struct clk *clk;
-	struct clk_init_data init;
-	struct cmux_clk *cmux_clk;
-	struct device_node *node;
-	int rc, count, i;
-	u32	offset;
-	const char *clk_name;
-	const char **parent_names;
-
-	rc = of_property_read_u32(np, "reg", &offset);
-	if (rc) {
-		pr_err("%s: could not get reg property\n", np->name);
-		return;
-	}
-
-	/* get the input clock source count */
-	count = of_property_count_strings(np, "clock-names");
-	if (count < 0) {
-		pr_err("%s: get clock count error\n", np->name);
-		return;
-	}
-	parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL);
-	if (!parent_names) {
-		pr_err("%s: could not allocate parent_names\n", __func__);
-		return;
-	}
-
-	for (i = 0; i < count; i++)
-		parent_names[i] = of_clk_get_parent_name(np, i);
-
-	cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL);
-	if (!cmux_clk) {
-		pr_err("%s: could not allocate cmux_clk\n", __func__);
-		goto err_name;
-	}
-	cmux_clk->reg = of_iomap(np, 0);
-	if (!cmux_clk->reg) {
-		pr_err("%s: could not map register\n", __func__);
-		goto err_clk;
-	}
-
-	node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
-	if (node && (offset >= 0x80))
-		cmux_clk->flags = CLKSEL_ADJUST;
-
-	rc = of_property_read_string_index(np, "clock-output-names",
-			0, &clk_name);
-	if (rc) {
-		pr_err("%s: read clock names error\n", np->name);
-		goto err_clk;
-	}
-
-	init.name = clk_name;
-	init.ops = &cmux_ops;
-	init.parent_names = parent_names;
-	init.num_parents = count;
-	init.flags = 0;
-	cmux_clk->hw.init = &init;
-
-	clk = clk_register(NULL, &cmux_clk->hw);
-	if (IS_ERR(clk)) {
-		pr_err("%s: could not register clock\n", clk_name);
-		goto err_clk;
-	}
-
-	rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
-	if (rc) {
-		pr_err("Could not register clock provider for node:%s\n",
-			 np->name);
-		goto err_clk;
-	}
-	goto err_name;
-
-err_clk:
-	kfree(cmux_clk);
-err_name:
-	/* free *_names because they are reallocated when registered */
-	kfree(parent_names);
-}
-
-static void __init core_pll_init(struct device_node *np)
-{
-	u32 mult;
-	int i, rc, count;
-	const char *clk_name, *parent_name;
-	struct clk_onecell_data *onecell_data;
-	struct clk      **subclks;
-	void __iomem *base;
-
-	base = of_iomap(np, 0);
-	if (!base) {
-		pr_err("clk-ppc: iomap error\n");
-		return;
-	}
-
-	/* get the multiple of PLL */
-	mult = ioread32be(base);
-
-	/* check if this PLL is disabled */
-	if (mult & PLL_KILL) {
-		pr_debug("PLL:%s is disabled\n", np->name);
-		goto err_map;
-	}
-	mult = (mult >> 1) & 0x3f;
-
-	parent_name = of_clk_get_parent_name(np, 0);
-	if (!parent_name) {
-		pr_err("PLL: %s must have a parent\n", np->name);
-		goto err_map;
-	}
-
-	count = of_property_count_strings(np, "clock-output-names");
-	if (count < 0 || count > 4) {
-		pr_err("%s: clock is not supported\n", np->name);
-		goto err_map;
-	}
-
-	/* output clock number per PLL */
-	clocks_per_pll = count;
-
-	subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
-	if (!subclks) {
-		pr_err("%s: could not allocate subclks\n", __func__);
-		goto err_map;
-	}
-
-	onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
-	if (!onecell_data) {
-		pr_err("%s: could not allocate onecell_data\n", __func__);
-		goto err_clks;
-	}
-
-	for (i = 0; i < count; i++) {
-		rc = of_property_read_string_index(np, "clock-output-names",
-				i, &clk_name);
-		if (rc) {
-			pr_err("%s: could not get clock names\n", np->name);
-			goto err_cell;
-		}
-
-		/*
-		 * when count == 4, there are 4 output clocks:
-		 * /1, /2, /3, /4 respectively
-		 * when count < 4, there are at least 2 output clocks:
-		 * /1, /2, (/4, if count == 3) respectively.
-		 */
-		if (count == 4)
-			subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-					parent_name, 0, mult, 1 + i);
-		else
-
-			subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-					parent_name, 0, mult, 1 << i);
-
-		if (IS_ERR(subclks[i])) {
-			pr_err("%s: could not register clock\n", clk_name);
-			goto err_cell;
-		}
-	}
-
-	onecell_data->clks = subclks;
-	onecell_data->clk_num = count;
-
-	rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
-	if (rc) {
-		pr_err("Could not register clk provider for node:%s\n",
-			 np->name);
-		goto err_cell;
-	}
-
-	iounmap(base);
-	return;
-err_cell:
-	kfree(onecell_data);
-err_clks:
-	kfree(subclks);
-err_map:
-	iounmap(base);
-}
-
-static void __init sysclk_init(struct device_node *node)
-{
-	struct clk *clk;
-	const char *clk_name = node->name;
-	struct device_node *np = of_get_parent(node);
-	u32 rate;
-
-	if (!np) {
-		pr_err("ppc-clk: could not get parent node\n");
-		return;
-	}
-
-	if (of_property_read_u32(np, "clock-frequency", &rate)) {
-		of_node_put(node);
-		return;
-	}
-
-	of_property_read_string(np, "clock-output-names", &clk_name);
-
-	clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
-	if (!IS_ERR(clk))
-		of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static const struct of_device_id clk_match[] __initconst = {
-	{ .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
-	{ .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
-	{ .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
-	{ .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
-	{ .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
-	{ .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
-	{}
-};
-
-static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
-{
-	of_clk_init(clk_match);
-
-	return 0;
-}
-
-static const struct of_device_id ppc_clk_ids[] __initconst = {
-	{ .compatible = "fsl,qoriq-clockgen-1.0", },
-	{ .compatible = "fsl,qoriq-clockgen-2.0", },
-	{}
-};
-
-static struct platform_driver ppc_corenet_clk_driver = {
-	.driver = {
-		.name = "ppc_corenet_clock",
-		.of_match_table = ppc_clk_ids,
-	},
-	.probe = ppc_corenet_clk_probe,
-};
-
-static int __init ppc_corenet_clk_init(void)
-{
-	return platform_driver_register(&ppc_corenet_clk_driver);
-}
-subsys_initcall(ppc_corenet_clk_init);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
new file mode 100644
index 0000000..cda90a9
--- /dev/null
+++ b/drivers/clk/clk-qoriq.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * clock driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+struct cmux_clk {
+	struct clk_hw hw;
+	void __iomem *reg;
+	unsigned int clk_per_pll;
+	u32 flags;
+};
+
+#define PLL_KILL			BIT(31)
+#define	CLKSEL_SHIFT		27
+#define CLKSEL_ADJUST		BIT(0)
+#define to_cmux_clk(p)		container_of(p, struct cmux_clk, hw)
+
+static int cmux_set_parent(struct clk_hw *hw, u8 idx)
+{
+	struct cmux_clk *clk = to_cmux_clk(hw);
+	u32 clksel;
+
+	clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
+	if (clk->flags & CLKSEL_ADJUST)
+		clksel += 8;
+	clksel = (clksel & 0xf) << CLKSEL_SHIFT;
+	iowrite32be(clksel, clk->reg);
+
+	return 0;
+}
+
+static u8 cmux_get_parent(struct clk_hw *hw)
+{
+	struct cmux_clk *clk = to_cmux_clk(hw);
+	u32 clksel;
+
+	clksel = ioread32be(clk->reg);
+	clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
+	if (clk->flags & CLKSEL_ADJUST)
+		clksel -= 8;
+	clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
+
+	return clksel;
+}
+
+static const struct clk_ops cmux_ops = {
+	.get_parent = cmux_get_parent,
+	.set_parent = cmux_set_parent,
+};
+
+static void __init core_mux_init(struct device_node *np)
+{
+	struct clk *clk;
+	struct clk_init_data init;
+	struct cmux_clk *cmux_clk;
+	struct device_node *node;
+	int rc, count, i;
+	u32	offset;
+	const char *clk_name;
+	const char **parent_names;
+	struct of_phandle_args clkspec;
+
+	rc = of_property_read_u32(np, "reg", &offset);
+	if (rc) {
+		pr_err("%s: could not get reg property\n", np->name);
+		return;
+	}
+
+	/* get the input clock source count */
+	count = of_property_count_strings(np, "clock-names");
+	if (count < 0) {
+		pr_err("%s: get clock count error\n", np->name);
+		return;
+	}
+	parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
+	if (!parent_names)
+		return;
+
+	for (i = 0; i < count; i++)
+		parent_names[i] = of_clk_get_parent_name(np, i);
+
+	cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
+	if (!cmux_clk)
+		goto err_name;
+
+	cmux_clk->reg = of_iomap(np, 0);
+	if (!cmux_clk->reg) {
+		pr_err("%s: could not map register\n", __func__);
+		goto err_clk;
+	}
+
+	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
+					&clkspec);
+	if (rc) {
+		pr_err("%s: parse clock node error\n", __func__);
+		goto err_clk;
+	}
+
+	cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
+			"clock-output-names");
+	of_node_put(clkspec.np);
+
+	node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
+	if (node && (offset >= 0x80))
+		cmux_clk->flags = CLKSEL_ADJUST;
+
+	rc = of_property_read_string_index(np, "clock-output-names",
+					   0, &clk_name);
+	if (rc) {
+		pr_err("%s: read clock names error\n", np->name);
+		goto err_clk;
+	}
+
+	init.name = clk_name;
+	init.ops = &cmux_ops;
+	init.parent_names = parent_names;
+	init.num_parents = count;
+	init.flags = 0;
+	cmux_clk->hw.init = &init;
+
+	clk = clk_register(NULL, &cmux_clk->hw);
+	if (IS_ERR(clk)) {
+		pr_err("%s: could not register clock\n", clk_name);
+		goto err_clk;
+	}
+
+	rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+	if (rc) {
+		pr_err("Could not register clock provider for node:%s\n",
+		       np->name);
+		goto err_clk;
+	}
+	goto err_name;
+
+err_clk:
+	kfree(cmux_clk);
+err_name:
+	/* free *_names because they are reallocated when registered */
+	kfree(parent_names);
+}
+
+static void __init core_pll_init(struct device_node *np)
+{
+	u32 mult;
+	int i, rc, count;
+	const char *clk_name, *parent_name;
+	struct clk_onecell_data *onecell_data;
+	struct clk      **subclks;
+	void __iomem *base;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("iomap error\n");
+		return;
+	}
+
+	/* get the multiple of PLL */
+	mult = ioread32be(base);
+
+	/* check if this PLL is disabled */
+	if (mult & PLL_KILL) {
+		pr_debug("PLL:%s is disabled\n", np->name);
+		goto err_map;
+	}
+	mult = (mult >> 1) & 0x3f;
+
+	parent_name = of_clk_get_parent_name(np, 0);
+	if (!parent_name) {
+		pr_err("PLL: %s must have a parent\n", np->name);
+		goto err_map;
+	}
+
+	count = of_property_count_strings(np, "clock-output-names");
+	if (count < 0 || count > 4) {
+		pr_err("%s: clock is not supported\n", np->name);
+		goto err_map;
+	}
+
+	subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+	if (!subclks)
+		goto err_map;
+
+	onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
+	if (!onecell_data)
+		goto err_clks;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(np, "clock-output-names",
+						   i, &clk_name);
+		if (rc) {
+			pr_err("%s: could not get clock names\n", np->name);
+			goto err_cell;
+		}
+
+		/*
+		 * when count == 4, there are 4 output clocks:
+		 * /1, /2, /3, /4 respectively
+		 * when count < 4, there are at least 2 output clocks:
+		 * /1, /2, (/4, if count == 3) respectively.
+		 */
+		if (count == 4)
+			subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+					parent_name, 0, mult, 1 + i);
+		else
+
+			subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+					parent_name, 0, mult, 1 << i);
+
+		if (IS_ERR(subclks[i])) {
+			pr_err("%s: could not register clock\n", clk_name);
+			goto err_cell;
+		}
+	}
+
+	onecell_data->clks = subclks;
+	onecell_data->clk_num = count;
+
+	rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
+	if (rc) {
+		pr_err("Could not register clk provider for node:%s\n",
+		       np->name);
+		goto err_cell;
+	}
+
+	iounmap(base);
+	return;
+err_cell:
+	kfree(onecell_data);
+err_clks:
+	kfree(subclks);
+err_map:
+	iounmap(base);
+}
+
+static void __init sysclk_init(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = node->name;
+	struct device_node *np = of_get_parent(node);
+	u32 rate;
+
+	if (!np) {
+		pr_err("could not get parent node\n");
+		return;
+	}
+
+	if (of_property_read_u32(np, "clock-frequency", &rate)) {
+		of_node_put(node);
+		return;
+	}
+
+	of_property_read_string(np, "clock-output-names", &clk_name);
+
+	clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+	if (!IS_ERR(clk))
+		of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static void __init pltfrm_pll_init(struct device_node *np)
+{
+	void __iomem *base;
+	uint32_t mult;
+	const char *parent_name, *clk_name;
+	int i, _errno;
+	struct clk_onecell_data *cod;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
+		return;
+	}
+
+	/* Get the multiple of PLL */
+	mult = ioread32be(base);
+
+	iounmap(base);
+
+	/* Check if this PLL is disabled */
+	if (mult & PLL_KILL) {
+		pr_debug("%s(): %s: Disabled\n", __func__, np->name);
+		return;
+	}
+	mult = (mult & GENMASK(6, 1)) >> 1;
+
+	parent_name = of_clk_get_parent_name(np, 0);
+	if (!parent_name) {
+		pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
+		       __func__, np->name);
+		return;
+	}
+
+	i = of_property_count_strings(np, "clock-output-names");
+	if (i < 0) {
+		pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
+		       __func__, np->name, i);
+		return;
+	}
+
+	cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
+	if (!cod)
+		return;
+	cod->clks = (struct clk **)(cod + 1);
+	cod->clk_num = i;
+
+	for (i = 0; i < cod->clk_num; i++) {
+		_errno = of_property_read_string_index(np, "clock-output-names",
+						       i, &clk_name);
+		if (_errno < 0) {
+			pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
+			       __func__, np->name, _errno);
+			goto return_clk_unregister;
+		}
+
+		cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
+					       parent_name, 0, mult, 1 + i);
+		if (IS_ERR(cod->clks[i])) {
+			pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
+			       __func__, np->name,
+			       clk_name, PTR_ERR(cod->clks[i]));
+			goto return_clk_unregister;
+		}
+	}
+
+	_errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
+	if (_errno < 0) {
+		pr_err("%s(): %s: of_clk_add_provider() = %d\n",
+		       __func__, np->name, _errno);
+		goto return_clk_unregister;
+	}
+
+	return;
+
+return_clk_unregister:
+	while (--i >= 0)
+		clk_unregister(cod->clks[i]);
+	kfree(cod);
+}
+
+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 642cf37..237f23f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -9,7 +9,7 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -37,6 +37,55 @@
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+static long clk_core_get_accuracy(struct clk_core *clk);
+static unsigned long clk_core_get_rate(struct clk_core *clk);
+static int clk_core_get_phase(struct clk_core *clk);
+static bool clk_core_is_prepared(struct clk_core *clk);
+static bool clk_core_is_enabled(struct clk_core *clk);
+static struct clk_core *clk_core_lookup(const char *name);
+
+/***    private data structures    ***/
+
+struct clk_core {
+	const char		*name;
+	const struct clk_ops	*ops;
+	struct clk_hw		*hw;
+	struct module		*owner;
+	struct clk_core		*parent;
+	const char		**parent_names;
+	struct clk_core		**parents;
+	u8			num_parents;
+	u8			new_parent_index;
+	unsigned long		rate;
+	unsigned long		req_rate;
+	unsigned long		new_rate;
+	struct clk_core		*new_parent;
+	struct clk_core		*new_child;
+	unsigned long		flags;
+	unsigned int		enable_count;
+	unsigned int		prepare_count;
+	unsigned long		accuracy;
+	int			phase;
+	struct hlist_head	children;
+	struct hlist_node	child_node;
+	struct hlist_node	debug_node;
+	struct hlist_head	clks;
+	unsigned int		notifier_count;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*dentry;
+#endif
+	struct kref		ref;
+};
+
+struct clk {
+	struct clk_core	*core;
+	const char *dev_id;
+	const char *con_id;
+	unsigned long min_rate;
+	unsigned long max_rate;
+	struct hlist_node child_node;
+};
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -114,7 +163,8 @@
 	NULL,
 };
 
-static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+				 int level)
 {
 	if (!c)
 		return;
@@ -122,14 +172,14 @@
 	seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
 		   level * 3 + 1, "",
 		   30 - level * 3, c->name,
-		   c->enable_count, c->prepare_count, clk_get_rate(c),
-		   clk_get_accuracy(c), clk_get_phase(c));
+		   c->enable_count, c->prepare_count, clk_core_get_rate(c),
+		   clk_core_get_accuracy(c), clk_core_get_phase(c));
 }
 
-static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
 				     int level)
 {
-	struct clk *child;
+	struct clk_core *child;
 
 	if (!c)
 		return;
@@ -142,7 +192,7 @@
 
 static int clk_summary_show(struct seq_file *s, void *data)
 {
-	struct clk *c;
+	struct clk_core *c;
 	struct hlist_head **lists = (struct hlist_head **)s->private;
 
 	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
@@ -172,7 +222,7 @@
 	.release	= single_release,
 };
 
-static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
 {
 	if (!c)
 		return;
@@ -180,14 +230,14 @@
 	seq_printf(s, "\"%s\": { ", c->name);
 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-	seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
-	seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
-	seq_printf(s, "\"phase\": %d", clk_get_phase(c));
+	seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
+	seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
+	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
 }
 
-static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
 {
-	struct clk *child;
+	struct clk_core *child;
 
 	if (!c)
 		return;
@@ -204,7 +254,7 @@
 
 static int clk_dump(struct seq_file *s, void *data)
 {
-	struct clk *c;
+	struct clk_core *c;
 	bool first_node = true;
 	struct hlist_head **lists = (struct hlist_head **)s->private;
 
@@ -240,7 +290,7 @@
 	.release	= single_release,
 };
 
-static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
+static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
 {
 	struct dentry *d;
 	int ret = -ENOMEM;
@@ -315,7 +365,7 @@
  * initialized.  Otherwise it bails out early since the debugfs clk tree
  * will be created lazily by clk_debug_init as part of a late_initcall.
  */
-static int clk_debug_register(struct clk *clk)
+static int clk_debug_register(struct clk_core *clk)
 {
 	int ret = 0;
 
@@ -340,16 +390,12 @@
  * debugfs clk tree if clk->dentry points to debugfs created by
  * clk_debug_register in __clk_init.
  */
-static void clk_debug_unregister(struct clk *clk)
+static void clk_debug_unregister(struct clk_core *clk)
 {
 	mutex_lock(&clk_debug_lock);
-	if (!clk->dentry)
-		goto out;
-
 	hlist_del_init(&clk->debug_node);
 	debugfs_remove_recursive(clk->dentry);
 	clk->dentry = NULL;
-out:
 	mutex_unlock(&clk_debug_lock);
 }
 
@@ -358,8 +404,9 @@
 {
 	struct dentry *d = NULL;
 
-	if (hw->clk->dentry)
-		d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
+	if (hw->core->dentry)
+		d = debugfs_create_file(name, mode, hw->core->dentry, data,
+					fops);
 
 	return d;
 }
@@ -379,7 +426,7 @@
  */
 static int __init clk_debug_init(void)
 {
-	struct clk *clk;
+	struct clk_core *clk;
 	struct dentry *d;
 
 	rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@
 }
 late_initcall(clk_debug_init);
 #else
-static inline int clk_debug_register(struct clk *clk) { return 0; }
-static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+static inline int clk_debug_register(struct clk_core *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk_core *clk,
+				      struct clk_core *new_parent)
 {
 }
-static inline void clk_debug_unregister(struct clk *clk)
+static inline void clk_debug_unregister(struct clk_core *clk)
 {
 }
 #endif
 
 /* caller must hold prepare_lock */
-static void clk_unprepare_unused_subtree(struct clk *clk)
+static void clk_unprepare_unused_subtree(struct clk_core *clk)
 {
-	struct clk *child;
-
-	if (!clk)
-		return;
+	struct clk_core *child;
 
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@
 	if (clk->flags & CLK_IGNORE_UNUSED)
 		return;
 
-	if (__clk_is_prepared(clk)) {
+	if (clk_core_is_prepared(clk)) {
 		if (clk->ops->unprepare_unused)
 			clk->ops->unprepare_unused(clk->hw);
 		else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@
 }
 
 /* caller must hold prepare_lock */
-static void clk_disable_unused_subtree(struct clk *clk)
+static void clk_disable_unused_subtree(struct clk_core *clk)
 {
-	struct clk *child;
+	struct clk_core *child;
 	unsigned long flags;
 
-	if (!clk)
-		goto out;
-
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_disable_unused_subtree(child);
 
@@ -477,7 +519,7 @@
 	 * sequence.  call .disable_unused if available, otherwise fall
 	 * back to .disable
 	 */
-	if (__clk_is_enabled(clk)) {
+	if (clk_core_is_enabled(clk)) {
 		if (clk->ops->disable_unused)
 			clk->ops->disable_unused(clk->hw);
 		else if (clk->ops->disable)
@@ -486,9 +528,6 @@
 
 unlock_out:
 	clk_enable_unlock(flags);
-
-out:
-	return;
 }
 
 static bool clk_ignore_unused;
@@ -501,7 +540,7 @@
 
 static int clk_disable_unused(void)
 {
-	struct clk *clk;
+	struct clk_core *clk;
 
 	if (clk_ignore_unused) {
 		pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@
 
 const char *__clk_get_name(struct clk *clk)
 {
-	return !clk ? NULL : clk->name;
+	return !clk ? NULL : clk->core->name;
 }
 EXPORT_SYMBOL_GPL(__clk_get_name);
 
 struct clk_hw *__clk_get_hw(struct clk *clk)
 {
-	return !clk ? NULL : clk->hw;
+	return !clk ? NULL : clk->core->hw;
 }
 EXPORT_SYMBOL_GPL(__clk_get_hw);
 
 u8 __clk_get_num_parents(struct clk *clk)
 {
-	return !clk ? 0 : clk->num_parents;
+	return !clk ? 0 : clk->core->num_parents;
 }
 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
 struct clk *__clk_get_parent(struct clk *clk)
 {
-	return !clk ? NULL : clk->parent;
+	if (!clk)
+		return NULL;
+
+	/* TODO: Create a per-user clk and change callers to call clk_put */
+	return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
 }
 EXPORT_SYMBOL_GPL(__clk_get_parent);
 
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
+							 u8 index)
 {
 	if (!clk || index >= clk->num_parents)
 		return NULL;
 	else if (!clk->parents)
-		return __clk_lookup(clk->parent_names[index]);
+		return clk_core_lookup(clk->parent_names[index]);
 	else if (!clk->parents[index])
 		return clk->parents[index] =
-			__clk_lookup(clk->parent_names[index]);
+			clk_core_lookup(clk->parent_names[index]);
 	else
 		return clk->parents[index];
 }
+
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+	struct clk_core *parent;
+
+	if (!clk)
+		return NULL;
+
+	parent = clk_core_get_parent_by_index(clk->core, index);
+
+	return !parent ? NULL : parent->hw->clk;
+}
 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
-	return !clk ? 0 : clk->enable_count;
+	return !clk ? 0 : clk->core->enable_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
 {
 	unsigned long ret;
 
@@ -593,9 +649,17 @@
 out:
 	return ret;
 }
+
+unsigned long __clk_get_rate(struct clk *clk)
+{
+	if (!clk)
+		return 0;
+
+	return clk_core_get_rate_nolock(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-static unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk_core *clk)
 {
 	if (!clk)
 		return 0;
@@ -605,11 +669,11 @@
 
 unsigned long __clk_get_flags(struct clk *clk)
 {
-	return !clk ? 0 : clk->flags;
+	return !clk ? 0 : clk->core->flags;
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-bool __clk_is_prepared(struct clk *clk)
+static bool clk_core_is_prepared(struct clk_core *clk)
 {
 	int ret;
 
@@ -630,7 +694,15 @@
 	return !!ret;
 }
 
-bool __clk_is_enabled(struct clk *clk)
+bool __clk_is_prepared(struct clk *clk)
+{
+	if (!clk)
+		return false;
+
+	return clk_core_is_prepared(clk->core);
+}
+
+static bool clk_core_is_enabled(struct clk_core *clk)
 {
 	int ret;
 
@@ -650,12 +722,21 @@
 out:
 	return !!ret;
 }
+
+bool __clk_is_enabled(struct clk *clk)
+{
+	if (!clk)
+		return false;
+
+	return clk_core_is_enabled(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
-static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
+static struct clk_core *__clk_lookup_subtree(const char *name,
+					     struct clk_core *clk)
 {
-	struct clk *child;
-	struct clk *ret;
+	struct clk_core *child;
+	struct clk_core *ret;
 
 	if (!strcmp(clk->name, name))
 		return clk;
@@ -669,10 +750,10 @@
 	return NULL;
 }
 
-struct clk *__clk_lookup(const char *name)
+static struct clk_core *clk_core_lookup(const char *name)
 {
-	struct clk *root_clk;
-	struct clk *ret;
+	struct clk_core *root_clk;
+	struct clk_core *ret;
 
 	if (!name)
 		return NULL;
@@ -694,42 +775,53 @@
 	return NULL;
 }
 
-/*
- * Helper for finding best parent to provide a given frequency. This can be used
- * directly as a determine_rate callback (e.g. for a mux), or from a more
- * complex clock that may combine a mux with other operations.
- */
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p)
+static bool mux_is_better_rate(unsigned long rate, unsigned long now,
+			   unsigned long best, unsigned long flags)
 {
-	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	if (flags & CLK_MUX_ROUND_CLOSEST)
+		return abs(now - rate) < abs(best - rate);
+
+	return now <= rate && now > best;
+}
+
+static long
+clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
+			     unsigned long min_rate,
+			     unsigned long max_rate,
+			     unsigned long *best_parent_rate,
+			     struct clk_hw **best_parent_p,
+			     unsigned long flags)
+{
+	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
 	int i, num_parents;
 	unsigned long parent_rate, best = 0;
 
 	/* if NO_REPARENT flag set, pass through to current parent */
-	if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
-		parent = clk->parent;
-		if (clk->flags & CLK_SET_RATE_PARENT)
-			best = __clk_round_rate(parent, rate);
+	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
+		parent = core->parent;
+		if (core->flags & CLK_SET_RATE_PARENT)
+			best = __clk_determine_rate(parent ? parent->hw : NULL,
+						    rate, min_rate, max_rate);
 		else if (parent)
-			best = __clk_get_rate(parent);
+			best = clk_core_get_rate_nolock(parent);
 		else
-			best = __clk_get_rate(clk);
+			best = clk_core_get_rate_nolock(core);
 		goto out;
 	}
 
 	/* find the parent that can provide the fastest rate <= rate */
-	num_parents = clk->num_parents;
+	num_parents = core->num_parents;
 	for (i = 0; i < num_parents; i++) {
-		parent = clk_get_parent_by_index(clk, i);
+		parent = clk_core_get_parent_by_index(core, i);
 		if (!parent)
 			continue;
-		if (clk->flags & CLK_SET_RATE_PARENT)
-			parent_rate = __clk_round_rate(parent, rate);
+		if (core->flags & CLK_SET_RATE_PARENT)
+			parent_rate = __clk_determine_rate(parent->hw, rate,
+							   min_rate,
+							   max_rate);
 		else
-			parent_rate = __clk_get_rate(parent);
-		if (parent_rate <= rate && parent_rate > best) {
+			parent_rate = clk_core_get_rate_nolock(parent);
+		if (mux_is_better_rate(rate, parent_rate, best, flags)) {
 			best_parent = parent;
 			best = parent_rate;
 		}
@@ -742,11 +834,63 @@
 
 	return best;
 }
+
+struct clk *__clk_lookup(const char *name)
+{
+	struct clk_core *core = clk_core_lookup(name);
+
+	return !core ? NULL : core->hw->clk;
+}
+
+static void clk_core_get_boundaries(struct clk_core *clk,
+				    unsigned long *min_rate,
+				    unsigned long *max_rate)
+{
+	struct clk *clk_user;
+
+	*min_rate = 0;
+	*max_rate = ULONG_MAX;
+
+	hlist_for_each_entry(clk_user, &clk->clks, child_node)
+		*min_rate = max(*min_rate, clk_user->min_rate);
+
+	hlist_for_each_entry(clk_user, &clk->clks, child_node)
+		*max_rate = min(*max_rate, clk_user->max_rate);
+}
+
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long min_rate,
+			      unsigned long max_rate,
+			      unsigned long *best_parent_rate,
+			      struct clk_hw **best_parent_p)
+{
+	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+					    best_parent_rate,
+					    best_parent_p, 0);
+}
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+			      unsigned long min_rate,
+			      unsigned long max_rate,
+			      unsigned long *best_parent_rate,
+			      struct clk_hw **best_parent_p)
+{
+	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+					    best_parent_rate,
+					    best_parent_p,
+					    CLK_MUX_ROUND_CLOSEST);
+}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+
 /***        clk api        ***/
 
-void __clk_unprepare(struct clk *clk)
+static void clk_core_unprepare(struct clk_core *clk)
 {
 	if (!clk)
 		return;
@@ -762,7 +906,7 @@
 	if (clk->ops->unprepare)
 		clk->ops->unprepare(clk->hw);
 
-	__clk_unprepare(clk->parent);
+	clk_core_unprepare(clk->parent);
 }
 
 /**
@@ -782,12 +926,12 @@
 		return;
 
 	clk_prepare_lock();
-	__clk_unprepare(clk);
+	clk_core_unprepare(clk->core);
 	clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
-int __clk_prepare(struct clk *clk)
+static int clk_core_prepare(struct clk_core *clk)
 {
 	int ret = 0;
 
@@ -795,14 +939,14 @@
 		return 0;
 
 	if (clk->prepare_count == 0) {
-		ret = __clk_prepare(clk->parent);
+		ret = clk_core_prepare(clk->parent);
 		if (ret)
 			return ret;
 
 		if (clk->ops->prepare) {
 			ret = clk->ops->prepare(clk->hw);
 			if (ret) {
-				__clk_unprepare(clk->parent);
+				clk_core_unprepare(clk->parent);
 				return ret;
 			}
 		}
@@ -829,15 +973,18 @@
 {
 	int ret;
 
+	if (!clk)
+		return 0;
+
 	clk_prepare_lock();
-	ret = __clk_prepare(clk);
+	ret = clk_core_prepare(clk->core);
 	clk_prepare_unlock();
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
-static void __clk_disable(struct clk *clk)
+static void clk_core_disable(struct clk_core *clk)
 {
 	if (!clk)
 		return;
@@ -851,7 +998,15 @@
 	if (clk->ops->disable)
 		clk->ops->disable(clk->hw);
 
-	__clk_disable(clk->parent);
+	clk_core_disable(clk->parent);
+}
+
+static void __clk_disable(struct clk *clk)
+{
+	if (!clk)
+		return;
+
+	clk_core_disable(clk->core);
 }
 
 /**
@@ -879,7 +1034,7 @@
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
-static int __clk_enable(struct clk *clk)
+static int clk_core_enable(struct clk_core *clk)
 {
 	int ret = 0;
 
@@ -890,7 +1045,7 @@
 		return -ESHUTDOWN;
 
 	if (clk->enable_count == 0) {
-		ret = __clk_enable(clk->parent);
+		ret = clk_core_enable(clk->parent);
 
 		if (ret)
 			return ret;
@@ -898,7 +1053,7 @@
 		if (clk->ops->enable) {
 			ret = clk->ops->enable(clk->hw);
 			if (ret) {
-				__clk_disable(clk->parent);
+				clk_core_disable(clk->parent);
 				return ret;
 			}
 		}
@@ -908,6 +1063,14 @@
 	return 0;
 }
 
+static int __clk_enable(struct clk *clk)
+{
+	if (!clk)
+		return 0;
+
+	return clk_core_enable(clk->core);
+}
+
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -934,17 +1097,13 @@
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
+						unsigned long rate,
+						unsigned long min_rate,
+						unsigned long max_rate)
 {
 	unsigned long parent_rate = 0;
-	struct clk *parent;
+	struct clk_core *parent;
 	struct clk_hw *parent_hw;
 
 	if (!clk)
@@ -956,15 +1115,59 @@
 
 	if (clk->ops->determine_rate) {
 		parent_hw = parent ? parent->hw : NULL;
-		return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
-						&parent_hw);
+		return clk->ops->determine_rate(clk->hw, rate,
+						min_rate, max_rate,
+						&parent_rate, &parent_hw);
 	} else if (clk->ops->round_rate)
 		return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 	else if (clk->flags & CLK_SET_RATE_PARENT)
-		return __clk_round_rate(clk->parent, rate);
+		return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
+						  max_rate);
 	else
 		return clk->rate;
 }
+
+/**
+ * __clk_determine_rate - get the closest rate actually supported by a clock
+ * @hw: determine the rate of this clock
+ * @rate: target rate
+ * @min_rate: returned rate must be greater than this rate
+ * @max_rate: returned rate must be less than this rate
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
+ * .determine_rate.
+ */
+unsigned long __clk_determine_rate(struct clk_hw *hw,
+				   unsigned long rate,
+				   unsigned long min_rate,
+				   unsigned long max_rate)
+{
+	if (!hw)
+		return 0;
+
+	return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(__clk_determine_rate);
+
+/**
+ * __clk_round_rate - round the given rate for a clk
+ * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	unsigned long min_rate;
+	unsigned long max_rate;
+
+	if (!clk)
+		return 0;
+
+	clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
+
+	return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+}
 EXPORT_SYMBOL_GPL(__clk_round_rate);
 
 /**
@@ -980,6 +1183,9 @@
 {
 	unsigned long ret;
 
+	if (!clk)
+		return 0;
+
 	clk_prepare_lock();
 	ret = __clk_round_rate(clk, rate);
 	clk_prepare_unlock();
@@ -1002,22 +1208,21 @@
  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
  * a driver returns that.
  */
-static int __clk_notify(struct clk *clk, unsigned long msg,
+static int __clk_notify(struct clk_core *clk, unsigned long msg,
 		unsigned long old_rate, unsigned long new_rate)
 {
 	struct clk_notifier *cn;
 	struct clk_notifier_data cnd;
 	int ret = NOTIFY_DONE;
 
-	cnd.clk = clk;
 	cnd.old_rate = old_rate;
 	cnd.new_rate = new_rate;
 
 	list_for_each_entry(cn, &clk_notifier_list, node) {
-		if (cn->clk == clk) {
+		if (cn->clk->core == clk) {
+			cnd.clk = cn->clk;
 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
 					&cnd);
-			break;
 		}
 	}
 
@@ -1035,10 +1240,10 @@
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_accuracies(struct clk *clk)
+static void __clk_recalc_accuracies(struct clk_core *clk)
 {
 	unsigned long parent_accuracy = 0;
-	struct clk *child;
+	struct clk_core *child;
 
 	if (clk->parent)
 		parent_accuracy = clk->parent->accuracy;
@@ -1053,16 +1258,7 @@
 		__clk_recalc_accuracies(child);
 }
 
-/**
- * clk_get_accuracy - return the accuracy of clk
- * @clk: the clk whose accuracy is being returned
- *
- * Simply returns the cached accuracy of the clk, unless
- * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
- * issued.
- * If clk is NULL then returns 0.
- */
-long clk_get_accuracy(struct clk *clk)
+static long clk_core_get_accuracy(struct clk_core *clk)
 {
 	unsigned long accuracy;
 
@@ -1075,9 +1271,27 @@
 
 	return accuracy;
 }
+
+/**
+ * clk_get_accuracy - return the accuracy of clk
+ * @clk: the clk whose accuracy is being returned
+ *
+ * Simply returns the cached accuracy of the clk, unless
+ * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
+ * issued.
+ * If clk is NULL then returns 0.
+ */
+long clk_get_accuracy(struct clk *clk)
+{
+	if (!clk)
+		return 0;
+
+	return clk_core_get_accuracy(clk->core);
+}
 EXPORT_SYMBOL_GPL(clk_get_accuracy);
 
-static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
+static unsigned long clk_recalc(struct clk_core *clk,
+				unsigned long parent_rate)
 {
 	if (clk->ops->recalc_rate)
 		return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
 {
 	unsigned long old_rate;
 	unsigned long parent_rate = 0;
-	struct clk *child;
+	struct clk_core *child;
 
 	old_rate = clk->rate;
 
@@ -1122,6 +1336,21 @@
 		__clk_recalc_rates(child, msg);
 }
 
+static unsigned long clk_core_get_rate(struct clk_core *clk)
+{
+	unsigned long rate;
+
+	clk_prepare_lock();
+
+	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
+		__clk_recalc_rates(clk, 0);
+
+	rate = clk_core_get_rate_nolock(clk);
+	clk_prepare_unlock();
+
+	return rate;
+}
+
 /**
  * clk_get_rate - return the rate of clk
  * @clk: the clk whose rate is being returned
@@ -1132,21 +1361,15 @@
  */
 unsigned long clk_get_rate(struct clk *clk)
 {
-	unsigned long rate;
+	if (!clk)
+		return 0;
 
-	clk_prepare_lock();
-
-	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
-		__clk_recalc_rates(clk, 0);
-
-	rate = __clk_get_rate(clk);
-	clk_prepare_unlock();
-
-	return rate;
+	return clk_core_get_rate(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
-static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+static int clk_fetch_parent_index(struct clk_core *clk,
+				  struct clk_core *parent)
 {
 	int i;
 
@@ -1160,7 +1383,7 @@
 	/*
 	 * find index of new parent clock using cached parent ptrs,
 	 * or if not yet cached, use string name comparison and cache
-	 * them now to avoid future calls to __clk_lookup.
+	 * them now to avoid future calls to clk_core_lookup.
 	 */
 	for (i = 0; i < clk->num_parents; i++) {
 		if (clk->parents[i] == parent)
@@ -1170,7 +1393,7 @@
 			continue;
 
 		if (!strcmp(clk->parent_names[i], parent->name)) {
-			clk->parents[i] = __clk_lookup(parent->name);
+			clk->parents[i] = clk_core_lookup(parent->name);
 			return i;
 		}
 	}
@@ -1178,7 +1401,7 @@
 	return -EINVAL;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
 {
 	hlist_del(&clk->child_node);
 
@@ -1195,10 +1418,11 @@
 	clk->parent = new_parent;
 }
 
-static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
+static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+					   struct clk_core *parent)
 {
 	unsigned long flags;
-	struct clk *old_parent = clk->parent;
+	struct clk_core *old_parent = clk->parent;
 
 	/*
 	 * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1442,9 @@
 	 * See also: Comment for clk_set_parent() below.
 	 */
 	if (clk->prepare_count) {
-		__clk_prepare(parent);
-		clk_enable(parent);
-		clk_enable(clk);
+		clk_core_prepare(parent);
+		clk_core_enable(parent);
+		clk_core_enable(clk);
 	}
 
 	/* update the clk tree topology */
@@ -1231,25 +1455,27 @@
 	return old_parent;
 }
 
-static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
-		struct clk *old_parent)
+static void __clk_set_parent_after(struct clk_core *core,
+				   struct clk_core *parent,
+				   struct clk_core *old_parent)
 {
 	/*
 	 * Finish the migration of prepare state and undo the changes done
 	 * for preventing a race with clk_enable().
 	 */
-	if (clk->prepare_count) {
-		clk_disable(clk);
-		clk_disable(old_parent);
-		__clk_unprepare(old_parent);
+	if (core->prepare_count) {
+		clk_core_disable(core);
+		clk_core_disable(old_parent);
+		clk_core_unprepare(old_parent);
 	}
 }
 
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+			    u8 p_index)
 {
 	unsigned long flags;
 	int ret = 0;
-	struct clk *old_parent;
+	struct clk_core *old_parent;
 
 	old_parent = __clk_set_parent_before(clk, parent);
 
@@ -1263,9 +1489,9 @@
 		clk_enable_unlock(flags);
 
 		if (clk->prepare_count) {
-			clk_disable(clk);
-			clk_disable(parent);
-			__clk_unprepare(parent);
+			clk_core_disable(clk);
+			clk_core_disable(parent);
+			clk_core_unprepare(parent);
 		}
 		return ret;
 	}
@@ -1291,9 +1517,10 @@
  *
  * Caller must hold prepare_lock.
  */
-static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
+static int __clk_speculate_rates(struct clk_core *clk,
+				 unsigned long parent_rate)
 {
-	struct clk *child;
+	struct clk_core *child;
 	unsigned long new_rate;
 	int ret = NOTIFY_DONE;
 
@@ -1319,10 +1546,10 @@
 	return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
-			     struct clk *new_parent, u8 p_index)
+static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
+			     struct clk_core *new_parent, u8 p_index)
 {
-	struct clk *child;
+	struct clk_core *child;
 
 	clk->new_rate = new_rate;
 	clk->new_parent = new_parent;
@@ -1342,13 +1569,16 @@
  * calculate the new rates returning the topmost clock that has to be
  * changed.
  */
-static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
+static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
+					   unsigned long rate)
 {
-	struct clk *top = clk;
-	struct clk *old_parent, *parent;
+	struct clk_core *top = clk;
+	struct clk_core *old_parent, *parent;
 	struct clk_hw *parent_hw;
 	unsigned long best_parent_rate = 0;
 	unsigned long new_rate;
+	unsigned long min_rate;
+	unsigned long max_rate;
 	int p_index = 0;
 
 	/* sanity */
@@ -1360,16 +1590,22 @@
 	if (parent)
 		best_parent_rate = parent->rate;
 
+	clk_core_get_boundaries(clk, &min_rate, &max_rate);
+
 	/* find the closest rate and parent clk/rate */
 	if (clk->ops->determine_rate) {
 		parent_hw = parent ? parent->hw : NULL;
 		new_rate = clk->ops->determine_rate(clk->hw, rate,
+						    min_rate,
+						    max_rate,
 						    &best_parent_rate,
 						    &parent_hw);
-		parent = parent_hw ? parent_hw->clk : NULL;
+		parent = parent_hw ? parent_hw->core : NULL;
 	} else if (clk->ops->round_rate) {
 		new_rate = clk->ops->round_rate(clk->hw, rate,
 						&best_parent_rate);
+		if (new_rate < min_rate || new_rate > max_rate)
+			return NULL;
 	} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
 		/* pass-through clock without adjustable parent */
 		clk->new_rate = clk->rate;
@@ -1390,7 +1626,7 @@
 	}
 
 	/* try finding the new parent index */
-	if (parent) {
+	if (parent && clk->num_parents > 1) {
 		p_index = clk_fetch_parent_index(clk, parent);
 		if (p_index < 0) {
 			pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1650,10 @@
  * so that in case of an error we can walk down the whole tree again and
  * abort the change.
  */
-static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
+static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
+						  unsigned long event)
 {
-	struct clk *child, *tmp_clk, *fail_clk = NULL;
+	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
 	int ret = NOTIFY_DONE;
 
 	if (clk->rate == clk->new_rate)
@@ -1451,14 +1688,14 @@
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk *clk)
+static void clk_change_rate(struct clk_core *clk)
 {
-	struct clk *child;
+	struct clk_core *child;
 	struct hlist_node *tmp;
 	unsigned long old_rate;
 	unsigned long best_parent_rate = 0;
 	bool skip_set_rate = false;
-	struct clk *old_parent;
+	struct clk_core *old_parent;
 
 	old_rate = clk->rate;
 
@@ -1506,6 +1743,45 @@
 		clk_change_rate(clk->new_child);
 }
 
+static int clk_core_set_rate_nolock(struct clk_core *clk,
+				    unsigned long req_rate)
+{
+	struct clk_core *top, *fail_clk;
+	unsigned long rate = req_rate;
+	int ret = 0;
+
+	if (!clk)
+		return 0;
+
+	/* bail early if nothing to do */
+	if (rate == clk_core_get_rate_nolock(clk))
+		return 0;
+
+	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
+		return -EBUSY;
+
+	/* calculate new rates and get the topmost changed clock */
+	top = clk_calc_new_rates(clk, rate);
+	if (!top)
+		return -EINVAL;
+
+	/* notify that we are about to change rates */
+	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+	if (fail_clk) {
+		pr_debug("%s: failed to set %s rate\n", __func__,
+				fail_clk->name);
+		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+		return -EBUSY;
+	}
+
+	/* change the rates */
+	clk_change_rate(top);
+
+	clk->req_rate = req_rate;
+
+	return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1529,8 +1805,7 @@
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk *top, *fail_clk;
-	int ret = 0;
+	int ret;
 
 	if (!clk)
 		return 0;
@@ -1538,36 +1813,8 @@
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
-	/* bail early if nothing to do */
-	if (rate == clk_get_rate(clk))
-		goto out;
+	ret = clk_core_set_rate_nolock(clk->core, rate);
 
-	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/* calculate new rates and get the topmost changed clock */
-	top = clk_calc_new_rates(clk, rate);
-	if (!top) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* notify that we are about to change rates */
-	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-	if (fail_clk) {
-		pr_debug("%s: failed to set %s rate\n", __func__,
-				fail_clk->name);
-		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/* change the rates */
-	clk_change_rate(top);
-
-out:
 	clk_prepare_unlock();
 
 	return ret;
@@ -1575,6 +1822,74 @@
 EXPORT_SYMBOL_GPL(clk_set_rate);
 
 /**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+	int ret = 0;
+
+	if (!clk)
+		return 0;
+
+	if (min > max) {
+		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
+		       __func__, clk->core->name, clk->dev_id, clk->con_id,
+		       min, max);
+		return -EINVAL;
+	}
+
+	clk_prepare_lock();
+
+	if (min != clk->min_rate || max != clk->max_rate) {
+		clk->min_rate = min;
+		clk->max_rate = max;
+		ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+	}
+
+	clk_prepare_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate_range);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+	if (!clk)
+		return 0;
+
+	return clk_set_rate_range(clk, rate, clk->max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_min_rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+	if (!clk)
+		return 0;
+
+	return clk_set_rate_range(clk, clk->min_rate, rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_max_rate);
+
+/**
  * clk_get_parent - return the parent of a clk
  * @clk: the clk whose parent gets returned
  *
@@ -1599,11 +1914,11 @@
  *
  * For single-parent clocks without .get_parent, first check to see if the
  * .parents array exists, and if so use it to avoid an expensive tree
- * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
+ * traversal.  If .parents does not exist then walk the tree.
  */
-static struct clk *__clk_init_parent(struct clk *clk)
+static struct clk_core *__clk_init_parent(struct clk_core *clk)
 {
-	struct clk *ret = NULL;
+	struct clk_core *ret = NULL;
 	u8 index;
 
 	/* handle the trivial cases */
@@ -1613,7 +1928,7 @@
 
 	if (clk->num_parents == 1) {
 		if (IS_ERR_OR_NULL(clk->parent))
-			clk->parent = __clk_lookup(clk->parent_names[0]);
+			clk->parent = clk_core_lookup(clk->parent_names[0]);
 		ret = clk->parent;
 		goto out;
 	}
@@ -1627,8 +1942,8 @@
 
 	/*
 	 * Do our best to cache parent clocks in clk->parents.  This prevents
-	 * unnecessary and expensive calls to __clk_lookup.  We don't set
-	 * clk->parent here; that is done by the calling function
+	 * unnecessary and expensive lookups.  We don't set clk->parent here;
+	 * that is done by the calling function.
 	 */
 
 	index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1953,14 @@
 			kcalloc(clk->num_parents, sizeof(struct clk *),
 					GFP_KERNEL);
 
-	ret = clk_get_parent_by_index(clk, index);
+	ret = clk_core_get_parent_by_index(clk, index);
 
 out:
 	return ret;
 }
 
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_core_reparent(struct clk_core *clk,
+				  struct clk_core *new_parent)
 {
 	clk_reparent(clk, new_parent);
 	__clk_recalc_accuracies(clk);
@@ -1652,23 +1968,40 @@
 }
 
 /**
- * clk_set_parent - switch the parent of a mux clk
- * @clk: the mux clk whose input we are switching
- * @parent: the new input to clk
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
  *
- * Re-parent clk to use parent as its new input source.  If clk is in
- * prepared state, the clk will get enabled for the duration of this call. If
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
- * that, the reparenting is glitchy in hardware, etc), use the
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
  *
- * After successfully changing clk's parent clk_set_parent will update the
- * clk topology, sysfs topology and propagate rate recalculation via
- * __clk_recalc_rates.
- *
- * Returns 0 on success, -EERROR otherwise.
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
  */
-int clk_set_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+	struct clk_core *core, *parent_core;
+	unsigned int i;
+
+	/* NULL clocks should be nops, so return success if either is NULL. */
+	if (!clk || !parent)
+		return true;
+
+	core = clk->core;
+	parent_core = parent->core;
+
+	/* Optimize for the case where the parent is already the parent. */
+	if (core->parent == parent_core)
+		return true;
+
+	for (i = 0; i < core->num_parents; i++)
+		if (strcmp(core->parent_names[i], parent_core->name) == 0)
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(clk_has_parent);
+
+static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
 {
 	int ret = 0;
 	int p_index = 0;
@@ -1728,6 +2061,31 @@
 
 	return ret;
 }
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as its new input source.  If clk is in
+ * prepared state, the clk will get enabled for the duration of this call. If
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
+ * that, the reparenting is glitchy in hardware, etc), use the
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ *
+ * After successfully changing clk's parent clk_set_parent will update the
+ * clk topology, sysfs topology and propagate rate recalculation via
+ * __clk_recalc_rates.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	if (!clk)
+		return 0;
+
+	return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+}
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
 /**
@@ -1764,13 +2122,13 @@
 
 	clk_prepare_lock();
 
-	if (!clk->ops->set_phase)
+	if (!clk->core->ops->set_phase)
 		goto out_unlock;
 
-	ret = clk->ops->set_phase(clk->hw, degrees);
+	ret = clk->core->ops->set_phase(clk->core->hw, degrees);
 
 	if (!ret)
-		clk->phase = degrees;
+		clk->core->phase = degrees;
 
 out_unlock:
 	clk_prepare_unlock();
@@ -1778,15 +2136,9 @@
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(clk_set_phase);
 
-/**
- * clk_get_phase - return the phase shift of a clock signal
- * @clk: clock signal source
- *
- * Returns the phase shift of a clock node in degrees, otherwise returns
- * -EERROR.
- */
-int clk_get_phase(struct clk *clk)
+static int clk_core_get_phase(struct clk_core *clk)
 {
 	int ret = 0;
 
@@ -1800,28 +2152,74 @@
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(clk_get_phase);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+	if (!clk)
+		return 0;
+
+	return clk_core_get_phase(clk->core);
+}
+
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+	/* trivial case: identical struct clk's or both NULL */
+	if (p == q)
+		return true;
+
+	/* true if clk->core pointers match. Avoid derefing garbage */
+	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
+		if (p->core == q->core)
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(clk_is_match);
 
 /**
  * __clk_init - initialize the data structures in a struct clk
  * @dev:	device initializing this clk, placeholder for now
  * @clk:	clk being initialized
  *
- * Initializes the lists in struct clk, queries the hardware for the
+ * Initializes the lists in struct clk_core, queries the hardware for the
  * parent and rate and sets them both.
  */
-int __clk_init(struct device *dev, struct clk *clk)
+static int __clk_init(struct device *dev, struct clk *clk_user)
 {
 	int i, ret = 0;
-	struct clk *orphan;
+	struct clk_core *orphan;
 	struct hlist_node *tmp2;
+	struct clk_core *clk;
+	unsigned long rate;
 
-	if (!clk)
+	if (!clk_user)
 		return -EINVAL;
 
+	clk = clk_user->core;
+
 	clk_prepare_lock();
 
 	/* check to see if a clock with this name is already registered */
-	if (__clk_lookup(clk->name)) {
+	if (clk_core_lookup(clk->name)) {
 		pr_debug("%s: clk %s already initialized\n",
 				__func__, clk->name);
 		ret = -EEXIST;
@@ -1873,7 +2271,7 @@
 		clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
 					GFP_KERNEL);
 		/*
-		 * __clk_lookup returns NULL for parents that have not been
+		 * clk_core_lookup returns NULL for parents that have not been
 		 * clk_init'd; thus any access to clk->parents[] must check
 		 * for a NULL pointer.  We can always perform lazy lookups for
 		 * missing parents later on.
@@ -1881,7 +2279,7 @@
 		if (clk->parents)
 			for (i = 0; i < clk->num_parents; i++)
 				clk->parents[i] =
-					__clk_lookup(clk->parent_names[i]);
+					clk_core_lookup(clk->parent_names[i]);
 	}
 
 	clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2334,13 @@
 	 * then rate is set to zero.
 	 */
 	if (clk->ops->recalc_rate)
-		clk->rate = clk->ops->recalc_rate(clk->hw,
-				__clk_get_rate(clk->parent));
+		rate = clk->ops->recalc_rate(clk->hw,
+				clk_core_get_rate_nolock(clk->parent));
 	else if (clk->parent)
-		clk->rate = clk->parent->rate;
+		rate = clk->parent->rate;
 	else
-		clk->rate = 0;
+		rate = 0;
+	clk->rate = clk->req_rate = rate;
 
 	/*
 	 * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2350,13 @@
 		if (orphan->num_parents && orphan->ops->get_parent) {
 			i = orphan->ops->get_parent(orphan->hw);
 			if (!strcmp(clk->name, orphan->parent_names[i]))
-				__clk_reparent(orphan, clk);
+				clk_core_reparent(orphan, clk);
 			continue;
 		}
 
 		for (i = 0; i < orphan->num_parents; i++)
 			if (!strcmp(clk->name, orphan->parent_names[i])) {
-				__clk_reparent(orphan, clk);
+				clk_core_reparent(orphan, clk);
 				break;
 			}
 	 }
@@ -1983,47 +2382,39 @@
 	return ret;
 }
 
-/**
- * __clk_register - register a clock and return a cookie.
- *
- * Same as clk_register, except that the .clk field inside hw shall point to a
- * preallocated (generally statically allocated) struct clk. None of the fields
- * of the struct clk need to be initialized.
- *
- * The data pointed to by .init and .clk field shall NOT be marked as init
- * data.
- *
- * __clk_register is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements its operations.  Returns 0
- * on success, otherwise an error code.
- */
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+			     const char *con_id)
 {
-	int ret;
 	struct clk *clk;
 
-	clk = hw->clk;
-	clk->name = hw->init->name;
-	clk->ops = hw->init->ops;
-	clk->hw = hw;
-	clk->flags = hw->init->flags;
-	clk->parent_names = hw->init->parent_names;
-	clk->num_parents = hw->init->num_parents;
-	if (dev && dev->driver)
-		clk->owner = dev->driver->owner;
-	else
-		clk->owner = NULL;
+	/* This is to allow this function to be chained to others */
+	if (!hw || IS_ERR(hw))
+		return (struct clk *) hw;
 
-	ret = __clk_init(dev, clk);
-	if (ret)
-		return ERR_PTR(ret);
+	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+	if (!clk)
+		return ERR_PTR(-ENOMEM);
+
+	clk->core = hw->core;
+	clk->dev_id = dev_id;
+	clk->con_id = con_id;
+	clk->max_rate = ULONG_MAX;
+
+	clk_prepare_lock();
+	hlist_add_head(&clk->child_node, &hw->core->clks);
+	clk_prepare_unlock();
 
 	return clk;
 }
-EXPORT_SYMBOL_GPL(__clk_register);
+
+void __clk_free_clk(struct clk *clk)
+{
+	clk_prepare_lock();
+	hlist_del(&clk->child_node);
+	clk_prepare_unlock();
+
+	kfree(clk);
+}
 
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2430,7 @@
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
 	int i, ret;
-	struct clk *clk;
+	struct clk_core *clk;
 
 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
 	if (!clk) {
@@ -2060,7 +2451,7 @@
 	clk->hw = hw;
 	clk->flags = hw->init->flags;
 	clk->num_parents = hw->init->num_parents;
-	hw->clk = clk;
+	hw->core = clk;
 
 	/* allocate local copy in case parent_names is __initdata */
 	clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2475,21 @@
 		}
 	}
 
-	ret = __clk_init(dev, clk);
+	INIT_HLIST_HEAD(&clk->clks);
+
+	hw->clk = __clk_create_clk(hw, NULL, NULL);
+	if (IS_ERR(hw->clk)) {
+		pr_err("%s: could not allocate per-user clk\n", __func__);
+		ret = PTR_ERR(hw->clk);
+		goto fail_parent_names_copy;
+	}
+
+	ret = __clk_init(dev, hw->clk);
 	if (!ret)
-		return clk;
+		return hw->clk;
+
+	__clk_free_clk(hw->clk);
+	hw->clk = NULL;
 
 fail_parent_names_copy:
 	while (--i >= 0)
@@ -2107,7 +2510,7 @@
  */
 static void __clk_release(struct kref *ref)
 {
-	struct clk *clk = container_of(ref, struct clk, ref);
+	struct clk_core *clk = container_of(ref, struct clk_core, ref);
 	int i = clk->num_parents;
 
 	kfree(clk->parents);
@@ -2165,12 +2568,13 @@
 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
 		return;
 
-	clk_debug_unregister(clk);
+	clk_debug_unregister(clk->core);
 
 	clk_prepare_lock();
 
-	if (clk->ops == &clk_nodrv_ops) {
-		pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+	if (clk->core->ops == &clk_nodrv_ops) {
+		pr_err("%s: unregistered clock: %s\n", __func__,
+		       clk->core->name);
 		return;
 	}
 	/*
@@ -2178,24 +2582,25 @@
 	 * a reference to this clock.
 	 */
 	flags = clk_enable_lock();
-	clk->ops = &clk_nodrv_ops;
+	clk->core->ops = &clk_nodrv_ops;
 	clk_enable_unlock(flags);
 
-	if (!hlist_empty(&clk->children)) {
-		struct clk *child;
+	if (!hlist_empty(&clk->core->children)) {
+		struct clk_core *child;
 		struct hlist_node *t;
 
 		/* Reparent all children to the orphan list. */
-		hlist_for_each_entry_safe(child, t, &clk->children, child_node)
-			clk_set_parent(child, NULL);
+		hlist_for_each_entry_safe(child, t, &clk->core->children,
+					  child_node)
+			clk_core_set_parent(child, NULL);
 	}
 
-	hlist_del_init(&clk->child_node);
+	hlist_del_init(&clk->core->child_node);
 
-	if (clk->prepare_count)
+	if (clk->core->prepare_count)
 		pr_warn("%s: unregistering prepared clock: %s\n",
-					__func__, clk->name);
-	kref_put(&clk->ref, __clk_release);
+					__func__, clk->core->name);
+	kref_put(&clk->core->ref, __clk_release);
 
 	clk_prepare_unlock();
 }
@@ -2263,11 +2668,13 @@
  */
 int __clk_get(struct clk *clk)
 {
-	if (clk) {
-		if (!try_module_get(clk->owner))
+	struct clk_core *core = !clk ? NULL : clk->core;
+
+	if (core) {
+		if (!try_module_get(core->owner))
 			return 0;
 
-		kref_get(&clk->ref);
+		kref_get(&core->ref);
 	}
 	return 1;
 }
@@ -2280,11 +2687,20 @@
 		return;
 
 	clk_prepare_lock();
-	owner = clk->owner;
-	kref_put(&clk->ref, __clk_release);
+
+	hlist_del(&clk->child_node);
+	if (clk->min_rate > clk->core->req_rate ||
+	    clk->max_rate < clk->core->req_rate)
+		clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+	owner = clk->core->owner;
+	kref_put(&clk->core->ref, __clk_release);
+
 	clk_prepare_unlock();
 
 	module_put(owner);
+
+	kfree(clk);
 }
 
 /***        clk rate change notifiers        ***/
@@ -2339,7 +2755,7 @@
 
 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
-	clk->notifier_count++;
+	clk->core->notifier_count++;
 
 out:
 	clk_prepare_unlock();
@@ -2376,7 +2792,7 @@
 	if (cn->clk == clk) {
 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-		clk->notifier_count--;
+		clk->core->notifier_count--;
 
 		/* XXX the notifier code should handle this better */
 		if (!cn->notifier_head.head) {
@@ -2506,7 +2922,8 @@
 }
 EXPORT_SYMBOL_GPL(of_clk_del_provider);
 
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+				       const char *dev_id, const char *con_id)
 {
 	struct of_clk_provider *provider;
 	struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2932,17 @@
 	list_for_each_entry(provider, &of_clk_providers, link) {
 		if (provider->node == clkspec->np)
 			clk = provider->get(clkspec, provider->data);
-		if (!IS_ERR(clk))
+		if (!IS_ERR(clk)) {
+			clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
+					       con_id);
+
+			if (!IS_ERR(clk) && !__clk_get(clk)) {
+				__clk_free_clk(clk);
+				clk = ERR_PTR(-ENOENT);
+			}
+
 			break;
+		}
 	}
 
 	return clk;
@@ -2527,7 +2953,7 @@
 	struct clk *clk;
 
 	mutex_lock(&of_clk_mutex);
-	clk = __of_clk_get_from_provider(clkspec);
+	clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
 	mutex_unlock(&of_clk_mutex);
 
 	return clk;
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index c798138..ba84540 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -9,9 +9,31 @@
  * published by the Free Software Foundation.
  */
 
+struct clk_hw;
+
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+				       const char *dev_id, const char *con_id);
 void of_clk_lock(void);
 void of_clk_unlock(void);
 #endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+			     const char *con_id);
+void __clk_free_clk(struct clk *clk);
+#else
+/* All these casts to avoid ifdefs in clkdev... */
+static inline struct clk *
+__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+{
+	return (struct clk *)hw;
+}
+static inline void __clk_free_clk(struct clk *clk) { }
+static struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+	return (struct clk_hw *)clk;
+}
+
+#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index da4bda8..043fd36 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/of.h>
 
 #include "clk.h"
@@ -28,6 +29,20 @@
 
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 
+static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
+					 const char *dev_id, const char *con_id)
+{
+	struct clk *clk;
+
+	if (!clkspec)
+		return ERR_PTR(-EINVAL);
+
+	of_clk_lock();
+	clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
+	of_clk_unlock();
+	return clk;
+}
+
 /**
  * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
  * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@
  */
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
 {
-	struct clk *clk;
-
-	if (!clkspec)
-		return ERR_PTR(-EINVAL);
-
-	of_clk_lock();
-	clk = __of_clk_get_from_provider(clkspec);
-
-	if (!IS_ERR(clk) && !__clk_get(clk))
-		clk = ERR_PTR(-ENOENT);
-
-	of_clk_unlock();
-	return clk;
+	return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
 }
 
-struct clk *of_clk_get(struct device_node *np, int index)
+static struct clk *__of_clk_get(struct device_node *np, int index,
+			       const char *dev_id, const char *con_id)
 {
 	struct of_phandle_args clkspec;
 	struct clk *clk;
@@ -67,22 +71,21 @@
 	if (rc)
 		return ERR_PTR(rc);
 
-	clk = of_clk_get_by_clkspec(&clkspec);
+	clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
 	of_node_put(clkspec.np);
+
 	return clk;
 }
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+	return __of_clk_get(np, index, np->full_name, NULL);
+}
 EXPORT_SYMBOL(of_clk_get);
 
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+					const char *dev_id,
+					const char *name)
 {
 	struct clk *clk = ERR_PTR(-ENOENT);
 
@@ -97,10 +100,10 @@
 		 */
 		if (name)
 			index = of_property_match_string(np, "clock-names", name);
-		clk = of_clk_get(np, index);
-		if (!IS_ERR(clk))
+		clk = __of_clk_get(np, index, dev_id, name);
+		if (!IS_ERR(clk)) {
 			break;
-		else if (name && index >= 0) {
+		} else if (name && index >= 0) {
 			if (PTR_ERR(clk) != -EPROBE_DEFER)
 				pr_err("ERROR: could not get clock %s:%s(%i)\n",
 					np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@
 
 	return clk;
 }
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+	if (!np)
+		return ERR_PTR(-ENOENT);
+
+	return __of_clk_get_by_name(np, np->full_name, name);
+}
 EXPORT_SYMBOL(of_clk_get_by_name);
+
+#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
+
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+					const char *dev_id,
+					const char *name)
+{
+	return ERR_PTR(-ENOENT);
+}
 #endif
 
 /*
@@ -168,14 +197,28 @@
 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 {
 	struct clk_lookup *cl;
+	struct clk *clk = NULL;
 
 	mutex_lock(&clocks_mutex);
+
 	cl = clk_find(dev_id, con_id);
-	if (cl && !__clk_get(cl->clk))
+	if (!cl)
+		goto out;
+
+	clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
+	if (IS_ERR(clk))
+		goto out;
+
+	if (!__clk_get(clk)) {
+		__clk_free_clk(clk);
 		cl = NULL;
+		goto out;
+	}
+
+out:
 	mutex_unlock(&clocks_mutex);
 
-	return cl ? cl->clk : ERR_PTR(-ENOENT);
+	return cl ? clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
@@ -185,10 +228,8 @@
 	struct clk *clk;
 
 	if (dev) {
-		clk = of_clk_get_by_name(dev->of_node, con_id);
-		if (!IS_ERR(clk))
-			return clk;
-		if (PTR_ERR(clk) == -EPROBE_DEFER)
+		clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
+		if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
 			return clk;
 	}
 
@@ -331,6 +372,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(clk_register_clkdev);
 
 /**
  * clk_register_clkdevs - register a set of clk_lookup for a struct clk
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 007144f..2e4f6d4 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -295,6 +295,8 @@
 }
 
 static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long min_rate,
+			      unsigned long max_rate,
 			      unsigned long *best_parent_rate,
 			      struct clk_hw **best_parent_p)
 {
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index 48fa53c..de6a873 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -202,6 +202,8 @@
 }
 
 static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk)
 {
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 38e9153..38e37bf 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,3 +1,4 @@
 obj-y				+= clk-pxa.o
 obj-$(CONFIG_PXA25x)		+= clk-pxa25x.o
 obj-$(CONFIG_PXA27x)		+= clk-pxa27x.o
+obj-$(CONFIG_PXA3xx)		+= clk-pxa3xx.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 4e83475..29cee9e 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -46,7 +46,7 @@
 		fix = &pclk->lp;
 	else
 		fix = &pclk->hp;
-	fix->hw.clk = hw->clk;
+	__clk_hw_set_clk(&fix->hw, hw);
 	return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
 }
 
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644
index 0000000..39f891b
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -0,0 +1,364 @@
+/*
+ * Marvell PXA3xxx family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <mach/smemc.h>
+#include <mach/pxa3xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+	PXA_CORE_60Mhz = 0,
+	PXA_CORE_RUN,
+	PXA_CORE_TURBO,
+};
+
+enum {
+	PXA_BUS_60Mhz = 0,
+	PXA_BUS_HSS,
+};
+
+/* crystal frequency to HSIO bus frequency multiplier (HSS) */
+static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
+
+/* crystal frequency to static memory controller multiplier (SMCFS) */
+static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
+static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+
+static const char * const get_freq_khz[] = {
+	"core", "ring_osc_60mhz", "run", "cpll", "system_bus"
+};
+
+/*
+ * Get the clock frequency as reflected by ACSR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa3xx_get_clk_frequency_khz(int info)
+{
+	struct clk *clk;
+	unsigned long clks[5];
+	int i;
+
+	for (i = 0; i < 5; i++) {
+		clk = clk_get(NULL, get_freq_khz[i]);
+		if (IS_ERR(clk)) {
+			clks[i] = 0;
+		} else {
+			clks[i] = clk_get_rate(clk);
+			clk_put(clk);
+		}
+	}
+	if (info) {
+		pr_info("RO Mode clock: %ld.%02ldMHz\n",
+			clks[1] / 1000000, (clks[0] % 1000000) / 10000);
+		pr_info("Run Mode clock: %ld.%02ldMHz\n",
+			clks[2] / 1000000, (clks[1] % 1000000) / 10000);
+		pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+			clks[3] / 1000000, (clks[2] % 1000000) / 10000);
+		pr_info("System bus clock: %ld.%02ldMHz\n",
+			clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+	}
+	return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	unsigned long ac97_div, rate;
+
+	ac97_div = AC97_DIV;
+
+	/* This may loose precision for some rates but won't for the
+	 * standard 24.576MHz.
+	 */
+	rate = parent_rate / 2;
+	rate /= ((ac97_div >> 12) & 0x7fff);
+	rate *= (ac97_div & 0xfff);
+
+	return rate;
+}
+PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
+
+static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
+					      unsigned long parent_rate)
+{
+	unsigned long acsr = ACSR;
+	unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+	return (parent_rate / 48)  * smcfs_mult[(acsr >> 23) & 0x7] /
+		df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
+PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
+
+static bool pxa3xx_is_ring_osc_forced(void)
+{
+	unsigned long acsr = ACSR;
+
+	return acsr & ACCR_D0CS;
+}
+
+PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
+PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
+PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
+PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
+PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
+PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
+
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp,	\
+		    div_hp, bit, is_lp, flags)				\
+	PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,		\
+		 mult_hp, div_hp, is_lp,  CKEN_AB(bit),			\
+		 (CKEN_ ## bit % 32), flags)
+#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp,		\
+			 mult_hp, div_hp, delay)			\
+	PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp,	\
+		    div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
+#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents)			\
+	PXA_CKEN_1RATE(dev_id, con_id, bit, parents,			\
+		       CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
+
+static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
+	PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
+	PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
+	PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
+	PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
+	PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
+	PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
+	PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
+	PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
+	PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
+	PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
+	PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
+	PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
+
+	PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
+			  pxa3xx_32Khz_bus_parents),
+	PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
+	PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
+	PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
+	PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
+
+	PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
+		    pxa3xx_is_ring_osc_forced, 0),
+	PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
+		    pxa3xx_is_ring_osc_forced, 0),
+	PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
+		    pxa3xx_is_ring_osc_forced, 0),
+	PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
+		    1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
+};
+
+static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
+
+	PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+	PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+	PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa320_clocks[] __initdata = {
+	PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
+	PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
+	PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa93x_clocks[] __initdata = {
+
+	PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+	PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+	PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
+					    unsigned long parent_rate)
+{
+	unsigned long acsr = ACSR;
+	unsigned int hss = (acsr >> 14) & 0x3;
+
+	if (pxa3xx_is_ring_osc_forced())
+		return parent_rate;
+	return parent_rate / 48 * hss_mult[hss];
+}
+
+static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
+{
+	if (pxa3xx_is_ring_osc_forced())
+		return PXA_BUS_60Mhz;
+	else
+		return PXA_BUS_HSS;
+}
+
+PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
+
+static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
+					      unsigned long parent_rate)
+{
+	return parent_rate;
+}
+
+static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
+{
+	unsigned long xclkcfg;
+	unsigned int t;
+
+	if (pxa3xx_is_ring_osc_forced())
+		return PXA_CORE_60Mhz;
+
+	/* Read XCLKCFG register turbo bit */
+	__asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+	t = xclkcfg & 0x1;
+
+	if (t)
+		return PXA_CORE_TURBO;
+	return PXA_CORE_RUN;
+}
+PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
+
+static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	unsigned long acsr = ACSR;
+	unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+	unsigned int t, xclkcfg;
+
+	/* Read XCLKCFG register turbo bit */
+	__asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+	t = xclkcfg & 0x1;
+
+	return t ? (parent_rate / xn) * 2 : parent_rate;
+}
+PARENTS(clk_pxa3xx_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa3xx_run, "run");
+
+static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
+	unsigned long parent_rate)
+{
+	unsigned long acsr = ACSR;
+	unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+	unsigned int xl = acsr & ACCR_XL_MASK;
+	unsigned int t, xclkcfg;
+
+	/* Read XCLKCFG register turbo bit */
+	__asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+	t = xclkcfg & 0x1;
+
+	pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
+	return t ? parent_rate * xl * xn : parent_rate * xl;
+}
+PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
+RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
+
+static void __init pxa3xx_register_core(void)
+{
+	clk_register_clk_pxa3xx_cpll();
+	clk_register_clk_pxa3xx_run();
+
+	clkdev_pxa_register(CLK_CORE, "core", NULL,
+			    clk_register_clk_pxa3xx_core());
+}
+
+static void __init pxa3xx_register_plls(void)
+{
+	clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
+				CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+				13 * MHz);
+	clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+				CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+				32768);
+	clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
+				CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+				120 * MHz);
+	clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+	clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
+	clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
+				  0, 1, 2);
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+	{ .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+	const char *con_id;
+	const char *dev_id;
+	const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+	DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
+	DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+	DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+	DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
+};
+
+static void __init pxa3xx_dummy_clocks_init(void)
+{
+	struct clk *clk;
+	struct dummy_clk *d;
+	const char *name;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+		d = &dummy_clks[i];
+		name = d->dev_id ? d->dev_id : d->con_id;
+		clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+		clk_register_clkdev(clk, d->con_id, d->dev_id);
+	}
+}
+
+static void __init pxa3xx_base_clocks_init(void)
+{
+	pxa3xx_register_plls();
+	pxa3xx_register_core();
+	clk_register_clk_pxa3xx_system_bus();
+	clk_register_clk_pxa3xx_ac97();
+	clk_register_clk_pxa3xx_smemc();
+	clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
+			  (void __iomem *)&OSCC, 11, 0, NULL);
+}
+
+int __init pxa3xx_clocks_init(void)
+{
+	int ret;
+
+	pxa3xx_base_clocks_init();
+	pxa3xx_dummy_clocks_init();
+	ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+	if (ret)
+		return ret;
+	if (cpu_is_pxa320())
+		return clk_pxa_cken_init(pxa320_clocks,
+					 ARRAY_SIZE(pxa320_clocks));
+	if (cpu_is_pxa300() || cpu_is_pxa310())
+		return clk_pxa_cken_init(pxa300_310_clocks,
+					 ARRAY_SIZE(pxa300_310_clocks));
+	return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+}
+
+static void __init pxa3xx_dt_clocks_init(struct device_node *np)
+{
+	pxa3xx_clocks_init();
+	clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1107351..0d7ab52 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -29,6 +29,15 @@
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  i2c, USB, SD/eMMC, etc.
 
+config IPQ_LCC_806X
+	tristate "IPQ806x LPASS Clock Controller"
+	select IPQ_GCC_806X
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the LPASS clock controller on ipq806x devices.
+	  Say Y if you want to use audio devices such as i2s, pcm,
+	  S/PDIF, etc.
+
 config MSM_GCC_8660
 	tristate "MSM8660 Global Clock Controller"
 	depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@
 	  Say Y if you want to use peripheral devices such as UART, SPI,
 	  i2c, USB, SD/eMMC, SATA, PCIe, etc.
 
+config MSM_LCC_8960
+	tristate "APQ8064/MSM8960 LPASS Clock Controller"
+	select MSM_GCC_8960
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the LPASS clock controller on apq8064/msm8960 devices.
+	  Say Y if you want to use audio devices such as i2s, pcm,
+	  SLIMBus, etc.
+
 config MSM_MMCC_8960
 	tristate "MSM8960 Multimedia Clock Controller"
 	select MSM_GCC_8960
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 783cfb2..6178264 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -6,13 +6,17 @@
 clk-qcom-y += clk-rcg.o
 clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o
 
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 60873a7..b4325f6 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,6 +141,7 @@
 
 static long
 clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
+		       unsigned long min_rate, unsigned long max_rate,
 		       unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_pll *pll = to_clk_pll(hw);
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 0b93972..0039bd7 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,6 +368,7 @@
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
 		const struct freq_tbl *f, unsigned long rate,
+		unsigned long min_rate, unsigned long max_rate,
 		unsigned long *p_rate, struct clk_hw **p_hw)
 {
 	unsigned long clk_flags;
@@ -397,22 +398,27 @@
 }
 
 static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long min_rate, unsigned long max_rate,
 		unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
 
-	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+			max_rate, p_rate, p);
 }
 
 static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long min_rate, unsigned long max_rate,
 		unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 
-	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+			max_rate, p_rate, p);
 }
 
 static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long min_rate, unsigned long max_rate,
 		unsigned long *p_rate, struct clk_hw **p_hw)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 08b8b37..742acfa 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -208,6 +208,7 @@
 }
 
 static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long min_rate, unsigned long max_rate,
 		unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@
 }
 
 static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long min_rate,
+				 unsigned long max_rate,
 				 unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
 static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
+			 unsigned long min_rate, unsigned long max_rate,
 			 unsigned long *p_rate, struct clk_hw **p_hw)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@
 };
 
 static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long min_rate,
+				 unsigned long max_rate,
 				 unsigned long *p_rate, struct clk_hw **p)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644
index 0000000..5348491
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-divider.h"
+
+static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
+{
+	return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
+}
+
+static long div_round_rate(struct clk_hw *hw, unsigned long rate,
+			   unsigned long *prate)
+{
+	struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+
+	return divider_round_rate(hw, rate, prate, NULL, divider->width,
+				  CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int div_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+	struct clk_regmap *clkr = &divider->clkr;
+	u32 div;
+
+	div = divider_get_val(rate, parent_rate, NULL, divider->width,
+			      CLK_DIVIDER_ROUND_CLOSEST);
+
+	return regmap_update_bits(clkr->regmap, divider->reg,
+				  (BIT(divider->width) - 1) << divider->shift,
+				  div << divider->shift);
+}
+
+static unsigned long div_recalc_rate(struct clk_hw *hw,
+				     unsigned long parent_rate)
+{
+	struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+	struct clk_regmap *clkr = &divider->clkr;
+	u32 div;
+
+	regmap_read(clkr->regmap, divider->reg, &div);
+	div >>= divider->shift;
+	div &= BIT(divider->width) - 1;
+
+	return divider_recalc_rate(hw, parent_rate, div, NULL,
+				   CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+const struct clk_ops clk_regmap_div_ops = {
+	.round_rate = div_round_rate,
+	.set_rate = div_set_rate,
+	.recalc_rate = div_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644
index 0000000..fc4492e
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
+#define __QCOM_CLK_REGMAP_DIVIDER_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_div {
+	u32			reg;
+	u32			shift;
+	u32			width;
+	struct clk_regmap	clkr;
+};
+
+extern const struct clk_ops clk_regmap_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644
index 0000000..cae3071
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-mux.h"
+
+static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
+{
+	return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
+}
+
+static u8 mux_get_parent(struct clk_hw *hw)
+{
+	struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+	struct clk_regmap *clkr = to_clk_regmap(hw);
+	unsigned int mask = GENMASK(mux->width - 1, 0);
+	unsigned int val;
+
+	regmap_read(clkr->regmap, mux->reg, &val);
+
+	val >>= mux->shift;
+	val &= mask;
+
+	return val;
+}
+
+static int mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+	struct clk_regmap *clkr = to_clk_regmap(hw);
+	unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+	unsigned int val;
+
+	val = index;
+	val <<= mux->shift;
+
+	return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
+}
+
+const struct clk_ops clk_regmap_mux_closest_ops = {
+	.get_parent = mux_get_parent,
+	.set_parent = mux_set_parent,
+	.determine_rate = __clk_mux_determine_rate_closest,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644
index 0000000..5cec761
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_H__
+#define __QCOM_CLK_REGMAP_MUX_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_mux {
+	u32			reg;
+	u32			shift;
+	u32			width;
+	struct clk_regmap	clkr;
+};
+
+extern const struct clk_ops clk_regmap_mux_closest_ops;
+
+#endif
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index afed5eb..cbdc31d 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -75,6 +75,17 @@
 	},
 };
 
+static struct clk_regmap pll4_vote = {
+	.enable_reg = 0x34c0,
+	.enable_mask = BIT(4),
+	.hw.init = &(struct clk_init_data){
+		.name = "pll4_vote",
+		.parent_names = (const char *[]){ "pll4" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
 static struct clk_pll pll8 = {
 	.l_reg = 0x3144,
 	.m_reg = 0x3148,
@@ -2163,6 +2174,7 @@
 	[PLL0] = &pll0.clkr,
 	[PLL0_VOTE] = &pll0_vote,
 	[PLL3] = &pll3.clkr,
+	[PLL4_VOTE] = &pll4_vote,
 	[PLL8] = &pll8.clkr,
 	[PLL8_VOTE] = &pll8_vote,
 	[PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index b0b562b..e60feff 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -48,6 +48,17 @@
 	},
 };
 
+static struct clk_regmap pll4_vote = {
+	.enable_reg = 0x34c0,
+	.enable_mask = BIT(4),
+	.hw.init = &(struct clk_init_data){
+		.name = "pll4_vote",
+		.parent_names = (const char *[]){ "pll4" },
+		.num_parents = 1,
+		.ops = &clk_pll_vote_ops,
+	},
+};
+
 static struct clk_pll pll8 = {
 	.l_reg = 0x3144,
 	.m_reg = 0x3148,
@@ -3023,6 +3034,7 @@
 
 static struct clk_regmap *gcc_msm8960_clks[] = {
 	[PLL3] = &pll3.clkr,
+	[PLL4_VOTE] = &pll4_vote,
 	[PLL8] = &pll8.clkr,
 	[PLL8_VOTE] = &pll8_vote,
 	[PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@
 
 static struct clk_regmap *gcc_apq8064_clks[] = {
 	[PLL3] = &pll3.clkr,
+	[PLL4_VOTE] = &pll4_vote,
 	[PLL8] = &pll8.clkr,
 	[PLL8_VOTE] = &pll8_vote,
 	[PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644
index 0000000..c9ff27b
--- /dev/null
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+	.l_reg = 0x4,
+	.m_reg = 0x8,
+	.n_reg = 0xc,
+	.config_reg = 0x14,
+	.mode_reg = 0x0,
+	.status_reg = 0x18,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll4",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+static const struct pll_config pll4_config = {
+	.l = 0xf,
+	.m = 0x91,
+	.n = 0xc7,
+	.vco_val = 0x0,
+	.vco_mask = BIT(17) | BIT(16),
+	.pre_div_val = 0x0,
+	.pre_div_mask = BIT(19),
+	.post_div_val = 0x0,
+	.post_div_mask = BIT(21) | BIT(20),
+	.mn_ena_mask = BIT(22),
+	.main_output_mask = BIT(23),
+};
+
+#define P_PXO	0
+#define P_PLL4	1
+
+static const u8 lcc_pxo_pll4_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL4]	= 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+	"pxo",
+	"pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_mi2s[] = {
+	{  1024000, P_PLL4, 4,  1,  96 },
+	{  1411200, P_PLL4, 4,  2, 139 },
+	{  1536000, P_PLL4, 4,  1,  64 },
+	{  2048000, P_PLL4, 4,  1,  48 },
+	{  2116800, P_PLL4, 4,  2,  93 },
+	{  2304000, P_PLL4, 4,  2,  85 },
+	{  2822400, P_PLL4, 4,  6, 209 },
+	{  3072000, P_PLL4, 4,  1,  32 },
+	{  3175200, P_PLL4, 4,  1,  31 },
+	{  4096000, P_PLL4, 4,  1,  24 },
+	{  4233600, P_PLL4, 4,  9, 209 },
+	{  4608000, P_PLL4, 4,  3,  64 },
+	{  5644800, P_PLL4, 4, 12, 209 },
+	{  6144000, P_PLL4, 4,  1,  16 },
+	{  6350400, P_PLL4, 4,  2,  31 },
+	{  8192000, P_PLL4, 4,  1,  12 },
+	{  8467200, P_PLL4, 4, 18, 209 },
+	{  9216000, P_PLL4, 4,  3,  32 },
+	{ 11289600, P_PLL4, 4, 24, 209 },
+	{ 12288000, P_PLL4, 4,  1,   8 },
+	{ 12700800, P_PLL4, 4, 27, 209 },
+	{ 13824000, P_PLL4, 4,  9,  64 },
+	{ 16384000, P_PLL4, 4,  1,   6 },
+	{ 16934400, P_PLL4, 4, 41, 238 },
+	{ 18432000, P_PLL4, 4,  3,  16 },
+	{ 22579200, P_PLL4, 2, 24, 209 },
+	{ 24576000, P_PLL4, 4,  1,   4 },
+	{ 27648000, P_PLL4, 4,  9,  32 },
+	{ 33868800, P_PLL4, 4, 41, 119 },
+	{ 36864000, P_PLL4, 4,  3,   8 },
+	{ 45158400, P_PLL4, 1, 24, 209 },
+	{ 49152000, P_PLL4, 4,  1,   2 },
+	{ 50803200, P_PLL4, 1, 27, 209 },
+	{ }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+	.ns_reg = 0x48,
+	.md_reg = 0x4c,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 24,
+		.m_val_shift = 8,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_aif_mi2s,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_osr_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static const char *lcc_mi2s_parents[] = {
+	"mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+	.halt_reg = 0x50,
+	.halt_bit = 1,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(17),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_osr_clk",
+			.parent_names = lcc_mi2s_parents,
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+	.reg = 0x48,
+	.shift = 10,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_div_clk",
+			.parent_names = lcc_mi2s_parents,
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+	.halt_reg = 0x50,
+	.halt_bit = 0,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_bit_div_clk",
+			.parent_names = (const char *[]){ "mi2s_div_clk" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+	.reg = 0x48,
+	.shift = 14,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_bit_clk",
+			.parent_names = (const char *[]){
+				"mi2s_bit_div_clk",
+				"mi2s_codec_clk",
+			},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct freq_tbl clk_tbl_pcm[] = {
+	{   64000, P_PLL4, 4, 1, 1536 },
+	{  128000, P_PLL4, 4, 1,  768 },
+	{  256000, P_PLL4, 4, 1,  384 },
+	{  512000, P_PLL4, 4, 1,  192 },
+	{ 1024000, P_PLL4, 4, 1,   96 },
+	{ 2048000, P_PLL4, 4, 1,   48 },
+	{ },
+};
+
+static struct clk_rcg pcm_src = {
+	.ns_reg = 0x54,
+	.md_reg = 0x58,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_pcm,
+	.clkr = {
+		.enable_reg = 0x54,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch pcm_clk_out = {
+	.halt_reg = 0x5c,
+	.halt_bit = 0,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x54,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_clk_out",
+			.parent_names = (const char *[]){ "pcm_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_mux pcm_clk = {
+	.reg = 0x54,
+	.shift = 10,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_clk",
+			.parent_names = (const char *[]){
+				"pcm_clk_out",
+				"pcm_codec_clk",
+			},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct freq_tbl clk_tbl_aif_osr[] = {
+	{  22050, P_PLL4, 1, 147, 20480 },
+	{  32000, P_PLL4, 1,   1,    96 },
+	{  44100, P_PLL4, 1, 147, 10240 },
+	{  48000, P_PLL4, 1,   1,    64 },
+	{  88200, P_PLL4, 1, 147,  5120 },
+	{  96000, P_PLL4, 1,   1,    32 },
+	{ 176400, P_PLL4, 1, 147,  2560 },
+	{ 192000, P_PLL4, 1,   1,    16 },
+	{ },
+};
+
+static struct clk_rcg spdif_src = {
+	.ns_reg = 0xcc,
+	.md_reg = 0xd0,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_aif_osr,
+	.clkr = {
+		.enable_reg = 0xcc,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "spdif_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static const char *lcc_spdif_parents[] = {
+	"spdif_src",
+};
+
+static struct clk_branch spdif_clk = {
+	.halt_reg = 0xd4,
+	.halt_bit = 1,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0xcc,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "spdif_clk",
+			.parent_names = lcc_spdif_parents,
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct freq_tbl clk_tbl_ahbix[] = {
+	{ 131072, P_PLL4, 1, 1, 3 },
+	{ },
+};
+
+static struct clk_rcg ahbix_clk = {
+	.ns_reg = 0x38,
+	.md_reg = 0x3c,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 24,
+		.m_val_shift = 8,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_ahbix,
+	.clkr = {
+		.enable_reg = 0x38,
+		.enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+		.hw.init = &(struct clk_init_data){
+			.name = "ahbix",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_regmap *lcc_ipq806x_clks[] = {
+	[PLL4] = &pll4.clkr,
+	[MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+	[MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+	[MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+	[MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+	[MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+	[PCM_SRC] = &pcm_src.clkr,
+	[PCM_CLK_OUT] = &pcm_clk_out.clkr,
+	[PCM_CLK] = &pcm_clk.clkr,
+	[SPDIF_SRC] = &spdif_src.clkr,
+	[SPDIF_CLK] = &spdif_clk.clkr,
+	[AHBIX_CLK] = &ahbix_clk.clkr,
+};
+
+static const struct regmap_config lcc_ipq806x_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0xfc,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc lcc_ipq806x_desc = {
+	.config = &lcc_ipq806x_regmap_config,
+	.clks = lcc_ipq806x_clks,
+	.num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+};
+
+static const struct of_device_id lcc_ipq806x_match_table[] = {
+	{ .compatible = "qcom,lcc-ipq8064" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
+
+static int lcc_ipq806x_probe(struct platform_device *pdev)
+{
+	u32 val;
+	struct regmap *regmap;
+
+	regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Configure the rate of PLL4 if the bootloader hasn't already */
+	val = regmap_read(regmap, 0x0, &val);
+	if (!val)
+		clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
+	/* Enable PLL4 source on the LPASS Primary PLL Mux */
+	regmap_write(regmap, 0xc4, 0x1);
+
+	return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
+}
+
+static int lcc_ipq806x_remove(struct platform_device *pdev)
+{
+	qcom_cc_remove(pdev);
+	return 0;
+}
+
+static struct platform_driver lcc_ipq806x_driver = {
+	.probe		= lcc_ipq806x_probe,
+	.remove		= lcc_ipq806x_remove,
+	.driver		= {
+		.name	= "lcc-ipq806x",
+		.of_match_table = lcc_ipq806x_match_table,
+	},
+};
+module_platform_driver(lcc_ipq806x_driver);
+
+MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644
index 0000000..e2c8632
--- /dev/null
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-msm8960.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+	.l_reg = 0x4,
+	.m_reg = 0x8,
+	.n_reg = 0xc,
+	.config_reg = 0x14,
+	.mode_reg = 0x0,
+	.status_reg = 0x18,
+	.status_bit = 16,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "pll4",
+		.parent_names = (const char *[]){ "pxo" },
+		.num_parents = 1,
+		.ops = &clk_pll_ops,
+	},
+};
+
+#define P_PXO	0
+#define P_PLL4	1
+
+static const u8 lcc_pxo_pll4_map[] = {
+	[P_PXO]		= 0,
+	[P_PLL4]	= 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+	"pxo",
+	"pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_osr_492[] = {
+	{   512000, P_PLL4, 4, 1, 240 },
+	{   768000, P_PLL4, 4, 1, 160 },
+	{  1024000, P_PLL4, 4, 1, 120 },
+	{  1536000, P_PLL4, 4, 1,  80 },
+	{  2048000, P_PLL4, 4, 1,  60 },
+	{  3072000, P_PLL4, 4, 1,  40 },
+	{  4096000, P_PLL4, 4, 1,  30 },
+	{  6144000, P_PLL4, 4, 1,  20 },
+	{  8192000, P_PLL4, 4, 1,  15 },
+	{ 12288000, P_PLL4, 4, 1,  10 },
+	{ 24576000, P_PLL4, 4, 1,   5 },
+	{ 27000000, P_PXO,  1, 0,   0 },
+	{ }
+};
+
+static struct freq_tbl clk_tbl_aif_osr_393[] = {
+	{   512000, P_PLL4, 4, 1, 192 },
+	{   768000, P_PLL4, 4, 1, 128 },
+	{  1024000, P_PLL4, 4, 1,  96 },
+	{  1536000, P_PLL4, 4, 1,  64 },
+	{  2048000, P_PLL4, 4, 1,  48 },
+	{  3072000, P_PLL4, 4, 1,  32 },
+	{  4096000, P_PLL4, 4, 1,  24 },
+	{  6144000, P_PLL4, 4, 1,  16 },
+	{  8192000, P_PLL4, 4, 1,  12 },
+	{ 12288000, P_PLL4, 4, 1,   8 },
+	{ 24576000, P_PLL4, 4, 1,   4 },
+	{ 27000000, P_PXO,  1, 0,   0 },
+	{ }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+	.ns_reg = 0x48,
+	.md_reg = 0x4c,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 24,
+		.m_val_shift = 8,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_aif_osr_393,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_osr_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static const char *lcc_mi2s_parents[] = {
+	"mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+	.halt_reg = 0x50,
+	.halt_bit = 1,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(17),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_osr_clk",
+			.parent_names = lcc_mi2s_parents,
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+	.reg = 0x48,
+	.shift = 10,
+	.width = 4,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_div_clk",
+			.parent_names = lcc_mi2s_parents,
+			.num_parents = 1,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+	.halt_reg = 0x50,
+	.halt_bit = 0,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x48,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_bit_div_clk",
+			.parent_names = (const char *[]){ "mi2s_div_clk" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+	.reg = 0x48,
+	.shift = 14,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "mi2s_bit_clk",
+			.parent_names = (const char *[]){
+				"mi2s_bit_div_clk",
+				"mi2s_codec_clk",
+			},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr)			\
+static struct clk_rcg prefix##_osr_src = {			\
+	.ns_reg = _ns,						\
+	.md_reg = _md,						\
+	.mn = {							\
+		.mnctr_en_bit = 8,				\
+		.mnctr_reset_bit = 7,				\
+		.mnctr_mode_shift = 5,				\
+		.n_val_shift = 24,				\
+		.m_val_shift = 8,				\
+		.width = 8,					\
+	},							\
+	.p = {							\
+		.pre_div_shift = 3,				\
+		.pre_div_width = 2,				\
+	},							\
+	.s = {							\
+		.src_sel_shift = 0,				\
+		.parent_map = lcc_pxo_pll4_map,			\
+	},							\
+	.freq_tbl = clk_tbl_aif_osr_393,			\
+	.clkr = {						\
+		.enable_reg = _ns,				\
+		.enable_mask = BIT(9),				\
+		.hw.init = &(struct clk_init_data){		\
+			.name = #prefix "_osr_src",		\
+			.parent_names = lcc_pxo_pll4,		\
+			.num_parents = 2,			\
+			.ops = &clk_rcg_ops,			\
+			.flags = CLK_SET_RATE_GATE,		\
+		},						\
+	},							\
+};								\
+								\
+static const char *lcc_##prefix##_parents[] = {			\
+	#prefix "_osr_src",					\
+};								\
+								\
+static struct clk_branch prefix##_osr_clk = {			\
+	.halt_reg = hr,						\
+	.halt_bit = 1,						\
+	.halt_check = BRANCH_HALT_ENABLE,			\
+	.clkr = {						\
+		.enable_reg = _ns,				\
+		.enable_mask = BIT(21),				\
+		.hw.init = &(struct clk_init_data){		\
+			.name = #prefix "_osr_clk",		\
+			.parent_names = lcc_##prefix##_parents,	\
+			.num_parents = 1,			\
+			.ops = &clk_branch_ops,			\
+			.flags = CLK_SET_RATE_PARENT,		\
+		},						\
+	},							\
+};								\
+								\
+static struct clk_regmap_div prefix##_div_clk = {		\
+	.reg = _ns,						\
+	.shift = 10,						\
+	.width = 8,						\
+	.clkr = {						\
+		.hw.init = &(struct clk_init_data){		\
+			.name = #prefix "_div_clk",		\
+			.parent_names = lcc_##prefix##_parents,	\
+			.num_parents = 1,			\
+			.ops = &clk_regmap_div_ops,		\
+		},						\
+	},							\
+};								\
+								\
+static struct clk_branch prefix##_bit_div_clk = {		\
+	.halt_reg = hr,						\
+	.halt_bit = 0,						\
+	.halt_check = BRANCH_HALT_ENABLE,			\
+	.clkr = {						\
+		.enable_reg = _ns,				\
+		.enable_mask = BIT(19),				\
+		.hw.init = &(struct clk_init_data){		\
+			.name = #prefix "_bit_div_clk",		\
+			.parent_names = (const char *[]){	\
+				#prefix "_div_clk"		\
+			}, 					\
+			.num_parents = 1,			\
+			.ops = &clk_branch_ops,			\
+			.flags = CLK_SET_RATE_PARENT,		\
+		},						\
+	},							\
+};								\
+								\
+static struct clk_regmap_mux prefix##_bit_clk = {		\
+	.reg = _ns,						\
+	.shift = 18,						\
+	.width = 1,						\
+	.clkr = {						\
+		.hw.init = &(struct clk_init_data){		\
+			.name = #prefix "_bit_clk",		\
+			.parent_names = (const char *[]){	\
+				#prefix "_bit_div_clk",		\
+				#prefix "_codec_clk",		\
+			},					\
+			.num_parents = 2,			\
+			.ops = &clk_regmap_mux_closest_ops,	\
+			.flags = CLK_SET_RATE_PARENT,		\
+		},						\
+	},							\
+}
+
+CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
+CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
+CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
+CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
+
+static struct freq_tbl clk_tbl_pcm_492[] = {
+	{   256000, P_PLL4, 4, 1, 480 },
+	{   512000, P_PLL4, 4, 1, 240 },
+	{   768000, P_PLL4, 4, 1, 160 },
+	{  1024000, P_PLL4, 4, 1, 120 },
+	{  1536000, P_PLL4, 4, 1,  80 },
+	{  2048000, P_PLL4, 4, 1,  60 },
+	{  3072000, P_PLL4, 4, 1,  40 },
+	{  4096000, P_PLL4, 4, 1,  30 },
+	{  6144000, P_PLL4, 4, 1,  20 },
+	{  8192000, P_PLL4, 4, 1,  15 },
+	{ 12288000, P_PLL4, 4, 1,  10 },
+	{ 24576000, P_PLL4, 4, 1,   5 },
+	{ 27000000, P_PXO,  1, 0,   0 },
+	{ }
+};
+
+static struct freq_tbl clk_tbl_pcm_393[] = {
+	{   256000, P_PLL4, 4, 1, 384 },
+	{   512000, P_PLL4, 4, 1, 192 },
+	{   768000, P_PLL4, 4, 1, 128 },
+	{  1024000, P_PLL4, 4, 1,  96 },
+	{  1536000, P_PLL4, 4, 1,  64 },
+	{  2048000, P_PLL4, 4, 1,  48 },
+	{  3072000, P_PLL4, 4, 1,  32 },
+	{  4096000, P_PLL4, 4, 1,  24 },
+	{  6144000, P_PLL4, 4, 1,  16 },
+	{  8192000, P_PLL4, 4, 1,  12 },
+	{ 12288000, P_PLL4, 4, 1,   8 },
+	{ 24576000, P_PLL4, 4, 1,   4 },
+	{ 27000000, P_PXO,  1, 0,   0 },
+	{ }
+};
+
+static struct clk_rcg pcm_src = {
+	.ns_reg = 0x54,
+	.md_reg = 0x58,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 16,
+		.m_val_shift = 16,
+		.width = 16,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_pcm_393,
+	.clkr = {
+		.enable_reg = 0x54,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static struct clk_branch pcm_clk_out = {
+	.halt_reg = 0x5c,
+	.halt_bit = 0,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0x54,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_clk_out",
+			.parent_names = (const char *[]){ "pcm_src" },
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap_mux pcm_clk = {
+	.reg = 0x54,
+	.shift = 10,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "pcm_clk",
+			.parent_names = (const char *[]){
+				"pcm_clk_out",
+				"pcm_codec_clk",
+			},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_rcg slimbus_src = {
+	.ns_reg = 0xcc,
+	.md_reg = 0xd0,
+	.mn = {
+		.mnctr_en_bit = 8,
+		.mnctr_reset_bit = 7,
+		.mnctr_mode_shift = 5,
+		.n_val_shift = 24,
+		.m_val_shift = 8,
+		.width = 8,
+	},
+	.p = {
+		.pre_div_shift = 3,
+		.pre_div_width = 2,
+	},
+	.s = {
+		.src_sel_shift = 0,
+		.parent_map = lcc_pxo_pll4_map,
+	},
+	.freq_tbl = clk_tbl_aif_osr_393,
+	.clkr = {
+		.enable_reg = 0xcc,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "slimbus_src",
+			.parent_names = lcc_pxo_pll4,
+			.num_parents = 2,
+			.ops = &clk_rcg_ops,
+			.flags = CLK_SET_RATE_GATE,
+		},
+	},
+};
+
+static const char *lcc_slimbus_parents[] = {
+	"slimbus_src",
+};
+
+static struct clk_branch audio_slimbus_clk = {
+	.halt_reg = 0xd4,
+	.halt_bit = 0,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0xcc,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "audio_slimbus_clk",
+			.parent_names = lcc_slimbus_parents,
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_branch sps_slimbus_clk = {
+	.halt_reg = 0xd4,
+	.halt_bit = 1,
+	.halt_check = BRANCH_HALT_ENABLE,
+	.clkr = {
+		.enable_reg = 0xcc,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "sps_slimbus_clk",
+			.parent_names = lcc_slimbus_parents,
+			.num_parents = 1,
+			.ops = &clk_branch_ops,
+			.flags = CLK_SET_RATE_PARENT,
+		},
+	},
+};
+
+static struct clk_regmap *lcc_msm8960_clks[] = {
+	[PLL4] = &pll4.clkr,
+	[MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+	[MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+	[MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+	[MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+	[MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+	[PCM_SRC] = &pcm_src.clkr,
+	[PCM_CLK_OUT] = &pcm_clk_out.clkr,
+	[PCM_CLK] = &pcm_clk.clkr,
+	[SLIMBUS_SRC] = &slimbus_src.clkr,
+	[AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
+	[SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
+	[CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
+	[CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
+	[CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
+	[CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
+	[CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
+	[SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
+	[SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
+	[SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
+	[SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
+	[SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
+	[CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
+	[CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
+	[CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
+	[CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
+	[CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
+	[SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
+	[SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
+	[SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
+	[SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
+	[SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
+};
+
+static const struct regmap_config lcc_msm8960_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0xfc,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc lcc_msm8960_desc = {
+	.config = &lcc_msm8960_regmap_config,
+	.clks = lcc_msm8960_clks,
+	.num_clks = ARRAY_SIZE(lcc_msm8960_clks),
+};
+
+static const struct of_device_id lcc_msm8960_match_table[] = {
+	{ .compatible = "qcom,lcc-msm8960" },
+	{ .compatible = "qcom,lcc-apq8064" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
+
+static int lcc_msm8960_probe(struct platform_device *pdev)
+{
+	u32 val;
+	struct regmap *regmap;
+
+	regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Use the correct frequency plan depending on speed of PLL4 */
+	regmap_read(regmap, 0x4, &val);
+	if (val == 0x12) {
+		slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
+		mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+		codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+		spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+		codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+		spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+		pcm_src.freq_tbl = clk_tbl_pcm_492;
+	}
+	/* Enable PLL4 source on the LPASS Primary PLL Mux */
+	regmap_write(regmap, 0xc4, 0x1);
+
+	return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
+}
+
+static int lcc_msm8960_remove(struct platform_device *pdev)
+{
+	qcom_cc_remove(pdev);
+	return 0;
+}
+
+static struct platform_driver lcc_msm8960_driver = {
+	.probe		= lcc_msm8960_probe,
+	.remove		= lcc_msm8960_remove,
+	.driver		= {
+		.name	= "lcc-msm8960",
+		.of_match_table = lcc_msm8960_match_table,
+	},
+};
+module_platform_driver(lcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-msm8960");
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index cbcddcc..05d7a0b 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -535,44 +535,44 @@
 	COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
 			RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
 			RK3288_CLKGATE_CON(1), 8, GFLAGS),
-	COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+	COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(17), 0,
 			RK3288_CLKGATE_CON(1), 9, GFLAGS),
-	MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+	MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
 	MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
 			RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
 	COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
 			RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
 			RK3288_CLKGATE_CON(1), 10, GFLAGS),
-	COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+	COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(18), 0,
 			RK3288_CLKGATE_CON(1), 11, GFLAGS),
-	MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+	MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
 	COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
 			RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
 			RK3288_CLKGATE_CON(1), 12, GFLAGS),
-	COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+	COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(19), 0,
 			RK3288_CLKGATE_CON(1), 13, GFLAGS),
-	MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+	MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
 	COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
 			RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
 			RK3288_CLKGATE_CON(1), 14, GFLAGS),
-	COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+	COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(20), 0,
 			RK3288_CLKGATE_CON(1), 15, GFLAGS),
-	MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+	MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
 	COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
 			RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
 			RK3288_CLKGATE_CON(2), 12, GFLAGS),
-	COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+	COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(7), 0,
 			RK3288_CLKGATE_CON(2), 13, GFLAGS),
-	MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+	MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
 
 	COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@
 	GATE(0, "jtag", "ext_jtag", 0,
 			RK3288_CLKGATE_CON(4), 14, GFLAGS),
 
-	COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+	COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
 			RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
 			RK3288_CLKGATE_CON(5), 14, GFLAGS),
 	COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@
 
 	GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
 	GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
-	GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
-	GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+	GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+	GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
 	GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
 
 	/* sclk_gpu gates */
@@ -805,6 +805,20 @@
 		rk3288_saved_cru_regs[i] =
 				readl_relaxed(rk3288_cru_base + reg_id);
 	}
+
+	/*
+	 * Switch PLLs other than DPLL (for SDRAM) to slow mode to
+	 * avoid crashes on resume. The Mask ROM on the system will
+	 * put APLL, CPLL, and GPLL into slow mode at resume time
+	 * anyway (which is why we restore them), but we might not
+	 * even make it to the Mask ROM if this isn't done at suspend
+	 * time.
+	 *
+	 * NOTE: only APLL truly matters here, but we'll do them all.
+	 */
+
+	writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
+
 	return 0;
 }
 
@@ -866,6 +880,14 @@
 		pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
 			__func__, PTR_ERR(clk));
 
+	/* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
+	clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+			__func__, PTR_ERR(clk));
+	else
+		rockchip_clk_add_lookup(clk, PCLK_WDT);
+
 	rockchip_clk_register_plls(rk3288_pll_clks,
 				   ARRAY_SIZE(rk3288_pll_clks),
 				   RK3288_GRF_SOC_STATUS1);
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index f2c2ccc..454b02a 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -82,6 +82,26 @@
 	{},
 };
 
+static void exynos_audss_clk_teardown(void)
+{
+	int i;
+
+	for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
+		if (!IS_ERR(clk_table[i]))
+			clk_unregister_mux(clk_table[i]);
+	}
+
+	for (; i < EXYNOS_SRP_CLK; i++) {
+		if (!IS_ERR(clk_table[i]))
+			clk_unregister_divider(clk_table[i]);
+	}
+
+	for (; i < clk_data.clk_num; i++) {
+		if (!IS_ERR(clk_table[i]))
+			clk_unregister_gate(clk_table[i]);
+	}
+}
+
 /* register exynos_audss clocks */
 static int exynos_audss_clk_probe(struct platform_device *pdev)
 {
@@ -219,10 +239,7 @@
 	return 0;
 
 unregister:
-	for (i = 0; i < clk_data.clk_num; i++) {
-		if (!IS_ERR(clk_table[i]))
-			clk_unregister(clk_table[i]);
-	}
+	exynos_audss_clk_teardown();
 
 	if (!IS_ERR(epll))
 		clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@
 
 static int exynos_audss_clk_remove(struct platform_device *pdev)
 {
-	int i;
-
 #ifdef CONFIG_PM_SLEEP
 	unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
 #endif
 
 	of_clk_del_provider(pdev->dev.of_node);
 
-	for (i = 0; i < clk_data.clk_num; i++) {
-		if (!IS_ERR(clk_table[i]))
-			clk_unregister(clk_table[i]);
-	}
+	exynos_audss_clk_teardown();
 
 	if (!IS_ERR(epll))
 		clk_disable_unprepare(epll);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 6e6cca3..cc4c348 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -104,27 +104,6 @@
 #define PWR_CTRL1_USE_CORE1_WFI			(1 << 1)
 #define PWR_CTRL1_USE_CORE0_WFI			(1 << 0)
 
-/* list of PLLs to be registered */
-enum exynos3250_plls {
-	apll, mpll, vpll, upll,
-	nr_plls
-};
-
-/* list of PLLs in DMC block to be registered */
-enum exynos3250_dmc_plls {
-	bpll, epll,
-	nr_dmc_plls
-};
-
-static void __iomem *reg_base;
-static void __iomem *dmc_reg_base;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_clk_regs;
-
 static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
 	SRC_LEFTBUS,
 	DIV_LEFTBUS,
@@ -195,43 +174,6 @@
 	PWR_CTRL2,
 };
 
-static int exynos3250_clk_suspend(void)
-{
-	samsung_clk_save(reg_base, exynos3250_clk_regs,
-				ARRAY_SIZE(exynos3250_cmu_clk_regs));
-	return 0;
-}
-
-static void exynos3250_clk_resume(void)
-{
-	samsung_clk_restore(reg_base, exynos3250_clk_regs,
-				ARRAY_SIZE(exynos3250_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos3250_clk_syscore_ops = {
-	.suspend = exynos3250_clk_suspend,
-	.resume = exynos3250_clk_resume,
-};
-
-static void exynos3250_clk_sleep_init(void)
-{
-	exynos3250_clk_regs =
-		samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
-					   ARRAY_SIZE(exynos3250_cmu_clk_regs));
-	if (!exynos3250_clk_regs) {
-		pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-		goto err;
-	}
-
-	register_syscore_ops(&exynos3250_clk_syscore_ops);
-	return;
-err:
-	kfree(exynos3250_clk_regs);
-}
-#else
-static inline void exynos3250_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_vpllsrc_p)		= { "fin_pll", };
 
@@ -782,18 +724,18 @@
 	{ /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
-	[apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-			APLL_LOCK, APLL_CON0, NULL),
-	[mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-			MPLL_LOCK, MPLL_CON0, NULL),
-	[vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
-			VPLL_LOCK, VPLL_CON0, NULL),
-	[upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
-			UPLL_LOCK, UPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_plls[] __initdata = {
+	PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+		APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
+	PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+			MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
+	PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
+			VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
+	PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
+			UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
 };
 
-static void __init exynos3_core_down_clock(void)
+static void __init exynos3_core_down_clock(void __iomem *reg_base)
 {
 	unsigned int tmp;
 
@@ -814,38 +756,31 @@
 	__raw_writel(0x0, reg_base + PWR_CTRL2);
 }
 
+static struct samsung_cmu_info cmu_info __initdata = {
+	.pll_clks		= exynos3250_plls,
+	.nr_pll_clks		= ARRAY_SIZE(exynos3250_plls),
+	.mux_clks		= mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(mux_clks),
+	.div_clks		= div_clks,
+	.nr_div_clks		= ARRAY_SIZE(div_clks),
+	.gate_clks		= gate_clks,
+	.nr_gate_clks		= ARRAY_SIZE(gate_clks),
+	.fixed_factor_clks	= fixed_factor_clks,
+	.nr_fixed_factor_clks	= ARRAY_SIZE(fixed_factor_clks),
+	.nr_clk_ids		= CLK_NR_CLKS,
+	.clk_regs		= exynos3250_cmu_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(exynos3250_cmu_clk_regs),
+};
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
 	struct samsung_clk_provider *ctx;
 
-	reg_base = of_iomap(np, 0);
-	if (!reg_base)
-		panic("%s: failed to map registers\n", __func__);
-
-	ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+	ctx = samsung_cmu_register_one(np, &cmu_info);
 	if (!ctx)
-		panic("%s: unable to allocate context.\n", __func__);
+		return;
 
-	samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
-					  ARRAY_SIZE(fixed_factor_clks));
-
-	exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
-	exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
-	exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
-	exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
-
-	samsung_clk_register_pll(ctx, exynos3250_plls,
-					ARRAY_SIZE(exynos3250_plls), reg_base);
-
-	samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
-	samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
-	samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
-
-	exynos3_core_down_clock();
-
-	exynos3250_clk_sleep_init();
-
-	samsung_clk_of_add_provider(np, ctx);
+	exynos3_core_down_clock(ctx->reg_base);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 
@@ -872,12 +807,6 @@
 #define EPLL_CON2		0x111c
 #define SRC_EPLL		0x1120
 
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
-
 static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
 	BPLL_LOCK,
 	BPLL_CON0,
@@ -899,43 +828,6 @@
 	SRC_EPLL,
 };
 
-static int exynos3250_dmc_clk_suspend(void)
-{
-	samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
-				ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-	return 0;
-}
-
-static void exynos3250_dmc_clk_resume(void)
-{
-	samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
-				ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
-	.suspend = exynos3250_dmc_clk_suspend,
-	.resume = exynos3250_dmc_clk_resume,
-};
-
-static void exynos3250_dmc_clk_sleep_init(void)
-{
-	exynos3250_dmc_clk_regs =
-		samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
-				   ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-	if (!exynos3250_dmc_clk_regs) {
-		pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-		goto err;
-	}
-
-	register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
-	return;
-err:
-	kfree(exynos3250_dmc_clk_regs);
-}
-#else
-static inline void exynos3250_dmc_clk_sleep_init(void) { }
-#endif
-
 PNAME(mout_epll_p)	= { "fin_pll", "fout_epll", };
 PNAME(mout_bpll_p)	= { "fin_pll", "fout_bpll", };
 PNAME(mout_mpll_mif_p)	= { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@
 	DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
 };
 
-static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = {
-	[bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
-			BPLL_LOCK, BPLL_CON0, NULL),
-	[epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-			EPLL_LOCK, EPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
+	PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
+		BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
+	PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+		EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
+};
+
+static struct samsung_cmu_info dmc_cmu_info __initdata = {
+	.pll_clks		= exynos3250_dmc_plls,
+	.nr_pll_clks		= ARRAY_SIZE(exynos3250_dmc_plls),
+	.mux_clks		= dmc_mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(dmc_mux_clks),
+	.div_clks		= dmc_div_clks,
+	.nr_div_clks		= ARRAY_SIZE(dmc_div_clks),
+	.nr_clk_ids		= NR_CLKS_DMC,
+	.clk_regs		= exynos3250_cmu_dmc_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
 };
 
 static void __init exynos3250_cmu_dmc_init(struct device_node *np)
 {
-	struct samsung_clk_provider *ctx;
-
-	dmc_reg_base = of_iomap(np, 0);
-	if (!dmc_reg_base)
-		panic("%s: failed to map registers\n", __func__);
-
-	ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
-	if (!ctx)
-		panic("%s: unable to allocate context.\n", __func__);
-
-	exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
-	exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
-
-	pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
-			exynos3250_dmc_plls[bpll].rate_table[0].rate,
-			exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
-			exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
-			exynos3250_dmc_plls[bpll].rate_table[0].sdiv
-	      );
-	samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
-				ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
-
-	samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
-	samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
-
-	exynos3250_dmc_clk_sleep_init();
-
-	samsung_clk_of_add_provider(np, ctx);
+	samsung_cmu_register_one(np, &dmc_cmu_info);
 }
 CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
 		exynos3250_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 88e8c6b..51462e8 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -703,12 +703,12 @@
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
-	DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+	DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
 	DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
 	DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
 			CLKOUT_CMU_LEFTBUS, 8, 6),
 
-	DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+	DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
 	DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
 	DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
 			CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@
 			CLK_SET_RATE_PARENT, 0),
 	DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
 
-	DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+	DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
 	DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
 	DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
-	DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+	DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
 	DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
 	DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
 	DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@
 	DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
 						8, 3, CLK_GET_RATE_NOCACHE, 0),
 	DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
-	DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+	DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
 	DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 2123fc2..6c78b09 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -113,19 +113,6 @@
 #define DIV_CPU0		0x14500
 #define DIV_CPU1		0x14504
 
-enum exynos4415_plls {
-	apll, epll, g3d_pll, isp_pll, disp_pll,
-	nr_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_ctx;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_clk_regs;
-
 static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
 	SRC_LEFTBUS,
 	DIV_LEFTBUS,
@@ -219,41 +206,6 @@
 	DIV_CPU1,
 };
 
-static int exynos4415_clk_suspend(void)
-{
-	samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-				ARRAY_SIZE(exynos4415_cmu_clk_regs));
-
-	return 0;
-}
-
-static void exynos4415_clk_resume(void)
-{
-	samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-				ARRAY_SIZE(exynos4415_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos4415_clk_syscore_ops = {
-	.suspend = exynos4415_clk_suspend,
-	.resume = exynos4415_clk_resume,
-};
-
-static void exynos4415_clk_sleep_init(void)
-{
-	exynos4415_clk_regs =
-		samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
-					ARRAY_SIZE(exynos4415_cmu_clk_regs));
-	if (!exynos4415_clk_regs) {
-		pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-		return;
-	}
-
-	register_syscore_ops(&exynos4415_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_g3d_pllsrc_p)	= { "fin_pll", };
 
@@ -959,56 +911,40 @@
 	{ /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
-	[apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-			APLL_LOCK, APLL_CON0, NULL),
-	[epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-			EPLL_LOCK, EPLL_CON0, NULL),
-	[g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
-			"mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
-	[isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
-			ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
-	[disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
-			"fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_plls[] __initdata = {
+	PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+		APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
+	PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+		EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
+	PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
+		G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
+	PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+		ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
+	PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+		"fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_info __initdata = {
+	.pll_clks		= exynos4415_plls,
+	.nr_pll_clks		= ARRAY_SIZE(exynos4415_plls),
+	.mux_clks		= exynos4415_mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(exynos4415_mux_clks),
+	.div_clks		= exynos4415_div_clks,
+	.nr_div_clks		= ARRAY_SIZE(exynos4415_div_clks),
+	.gate_clks		= exynos4415_gate_clks,
+	.nr_gate_clks		= ARRAY_SIZE(exynos4415_gate_clks),
+	.fixed_clks		= exynos4415_fixed_rate_clks,
+	.nr_fixed_clks		= ARRAY_SIZE(exynos4415_fixed_rate_clks),
+	.fixed_factor_clks	= exynos4415_fixed_factor_clks,
+	.nr_fixed_factor_clks	= ARRAY_SIZE(exynos4415_fixed_factor_clks),
+	.nr_clk_ids		= CLK_NR_CLKS,
+	.clk_regs		= exynos4415_cmu_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(exynos4415_cmu_clk_regs),
 };
 
 static void __init exynos4415_cmu_init(struct device_node *np)
 {
-	void __iomem *reg_base;
-
-	reg_base = of_iomap(np, 0);
-	if (!reg_base)
-		panic("%s: failed to map registers\n", __func__);
-
-	exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-	if (!exynos4415_ctx)
-		panic("%s: unable to allocate context.\n", __func__);
-
-	exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
-	exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
-	exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
-	exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
-	exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
-
-	samsung_clk_register_fixed_factor(exynos4415_ctx,
-				exynos4415_fixed_factor_clks,
-				ARRAY_SIZE(exynos4415_fixed_factor_clks));
-	samsung_clk_register_fixed_rate(exynos4415_ctx,
-				exynos4415_fixed_rate_clks,
-				ARRAY_SIZE(exynos4415_fixed_rate_clks));
-
-	samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
-				ARRAY_SIZE(exynos4415_plls), reg_base);
-	samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
-				ARRAY_SIZE(exynos4415_mux_clks));
-	samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
-				ARRAY_SIZE(exynos4415_div_clks));
-	samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
-				ARRAY_SIZE(exynos4415_gate_clks));
-
-	exynos4415_clk_sleep_init();
-
-	samsung_clk_of_add_provider(np, exynos4415_ctx);
+	samsung_cmu_register_one(np, &cmu_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 
@@ -1027,16 +963,6 @@
 #define SRC_DMC			0x300
 #define DIV_DMC1		0x504
 
-enum exynos4415_dmc_plls {
-	mpll, bpll,
-	nr_dmc_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_dmc_ctx;
-
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
-
 static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
 	MPLL_LOCK,
 	MPLL_CON0,
@@ -1050,42 +976,6 @@
 	DIV_DMC1,
 };
 
-static int exynos4415_dmc_clk_suspend(void)
-{
-	samsung_clk_save(exynos4415_dmc_ctx->reg_base,
-				exynos4415_dmc_clk_regs,
-				ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-	return 0;
-}
-
-static void exynos4415_dmc_clk_resume(void)
-{
-	samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
-				exynos4415_dmc_clk_regs,
-				ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
-	.suspend = exynos4415_dmc_clk_suspend,
-	.resume = exynos4415_dmc_clk_resume,
-};
-
-static void exynos4415_dmc_clk_sleep_init(void)
-{
-	exynos4415_dmc_clk_regs =
-		samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
-				ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-	if (!exynos4415_dmc_clk_regs) {
-		pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-		return;
-	}
-
-	register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_dmc_clk_sleep_init(void) { }
-#endif /* CONFIG_PM_SLEEP */
-
 PNAME(mout_mpll_p)		= { "fin_pll", "fout_mpll", };
 PNAME(mout_bpll_p)		= { "fin_pll", "fout_bpll", };
 PNAME(mbpll_p)			= { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@
 	DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
 };
 
-static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
-	[mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
-		MPLL_LOCK, MPLL_CON0, NULL),
-	[bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
-		BPLL_LOCK, BPLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
+	PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+		MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
+	PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+		BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_dmc_info __initdata = {
+	.pll_clks		= exynos4415_dmc_plls,
+	.nr_pll_clks		= ARRAY_SIZE(exynos4415_dmc_plls),
+	.mux_clks		= exynos4415_dmc_mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(exynos4415_dmc_mux_clks),
+	.div_clks		= exynos4415_dmc_div_clks,
+	.nr_div_clks		= ARRAY_SIZE(exynos4415_dmc_div_clks),
+	.nr_clk_ids		= NR_CLKS_DMC,
+	.clk_regs		= exynos4415_cmu_dmc_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
 };
 
 static void __init exynos4415_cmu_dmc_init(struct device_node *np)
 {
-	void __iomem *reg_base;
-
-	reg_base = of_iomap(np, 0);
-	if (!reg_base)
-		panic("%s: failed to map registers\n", __func__);
-
-	exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
-	if (!exynos4415_dmc_ctx)
-		panic("%s: unable to allocate context.\n", __func__);
-
-	exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
-	exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
-
-	samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
-				ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
-	samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
-				ARRAY_SIZE(exynos4415_dmc_mux_clks));
-	samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
-				ARRAY_SIZE(exynos4415_dmc_div_clks));
-
-	exynos4415_dmc_clk_sleep_init();
-
-	samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+	samsung_cmu_register_one(np, &cmu_dmc_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
 		exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index ea4483b..03d36e8 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -34,6 +34,7 @@
 #define DIV_TOPC0		0x0600
 #define DIV_TOPC1		0x0604
 #define DIV_TOPC3		0x060C
+#define ENABLE_ACLK_TOPC1	0x0804
 
 static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
 	FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@
 };
 
 /* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_aud_pll_ctrl_p)	= { "fin_pll", "fout_aud_pll" };
 PNAME(mout_bus0_pll_ctrl_p)	= { "fin_pll", "fout_bus0_pll" };
 PNAME(mout_bus1_pll_ctrl_p)	= { "fin_pll", "fout_bus1_pll" };
 PNAME(mout_cc_pll_ctrl_p)	= { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@
 
 	MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
 		MUX_SEL_TOPC1, 16, 1),
+	MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
 
 	MUX(0, "mout_aclk_ccore_133", mout_topc_group2,	MUX_SEL_TOPC2, 4, 2),
 
+	MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
 	MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
 };
 
@@ -114,6 +118,8 @@
 	DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
 		DIV_TOPC0, 4, 4),
 
+	DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
+		DIV_TOPC1, 20, 4),
 	DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
 		DIV_TOPC1, 24, 4),
 
@@ -125,6 +131,18 @@
 		DIV_TOPC3, 12, 3),
 	DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
 		DIV_TOPC3, 16, 3),
+	DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
+		DIV_TOPC3, 28, 3),
+};
+
+static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
+	PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+	{},
+};
+
+static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+	GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
+		ENABLE_ACLK_TOPC1, 20, 0, 0),
 };
 
 static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@
 		BUS1_DPLL_CON0, NULL),
 	PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
 		MFC_PLL_CON0, NULL),
-	PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
-		AUD_PLL_CON0, NULL),
+	PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+		AUD_PLL_CON0, pll1460x_24mhz_tbl),
 };
 
 static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@
 	.nr_mux_clks		= ARRAY_SIZE(topc_mux_clks),
 	.div_clks		= topc_div_clks,
 	.nr_div_clks		= ARRAY_SIZE(topc_div_clks),
+	.gate_clks		= topc_gate_clks,
+	.nr_gate_clks		= ARRAY_SIZE(topc_gate_clks),
 	.fixed_factor_clks	= topc_fixed_factor_clks,
 	.nr_fixed_factor_clks	= ARRAY_SIZE(topc_fixed_factor_clks),
 	.nr_clk_ids		= TOPC_NR_CLK,
@@ -166,9 +186,18 @@
 #define MUX_SEL_TOP00			0x0200
 #define MUX_SEL_TOP01			0x0204
 #define MUX_SEL_TOP03			0x020C
+#define MUX_SEL_TOP0_PERIC0		0x0230
+#define MUX_SEL_TOP0_PERIC1		0x0234
+#define MUX_SEL_TOP0_PERIC2		0x0238
 #define MUX_SEL_TOP0_PERIC3		0x023C
 #define DIV_TOP03			0x060C
+#define DIV_TOP0_PERIC0			0x0630
+#define DIV_TOP0_PERIC1			0x0634
+#define DIV_TOP0_PERIC2			0x0638
 #define DIV_TOP0_PERIC3			0x063C
+#define ENABLE_SCLK_TOP0_PERIC0		0x0A30
+#define ENABLE_SCLK_TOP0_PERIC1		0x0A34
+#define ENABLE_SCLK_TOP0_PERIC2		0x0A38
 #define ENABLE_SCLK_TOP0_PERIC3		0x0A3C
 
 /* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@
 PNAME(mout_bus1_pll_p)	= { "fin_pll", "dout_sclk_bus1_pll" };
 PNAME(mout_cc_pll_p)	= { "fin_pll", "dout_sclk_cc_pll" };
 PNAME(mout_mfc_pll_p)	= { "fin_pll", "dout_sclk_mfc_pll" };
+PNAME(mout_aud_pll_p)	= { "fin_pll", "dout_sclk_aud_pll" };
 
 PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
 	"ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@
 PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
 	"mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
 	"mout_top0_half_mfc_pll"};
+PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
+	"ioclk_audiocdclk1", "ioclk_spdif_extclk",
+	"mout_top0_aud_pll", "mout_top0_half_bus0_pll",
+	"mout_top0_half_bus1_pll"};
+PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
+	"mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
 
 static unsigned long top0_clk_regs[] __initdata = {
 	MUX_SEL_TOP00,
 	MUX_SEL_TOP01,
 	MUX_SEL_TOP03,
+	MUX_SEL_TOP0_PERIC0,
+	MUX_SEL_TOP0_PERIC1,
+	MUX_SEL_TOP0_PERIC2,
 	MUX_SEL_TOP0_PERIC3,
 	DIV_TOP03,
+	DIV_TOP0_PERIC0,
+	DIV_TOP0_PERIC1,
+	DIV_TOP0_PERIC2,
 	DIV_TOP0_PERIC3,
+	ENABLE_SCLK_TOP0_PERIC0,
+	ENABLE_SCLK_TOP0_PERIC1,
+	ENABLE_SCLK_TOP0_PERIC2,
 	ENABLE_SCLK_TOP0_PERIC3,
 };
 
 static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+	MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
 	MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
 	MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
 	MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@
 	MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
 	MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
 
+	MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
+	MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
+	MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
+
+	MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
+	MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
+
+	MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
+	MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
 	MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
 	MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
 	MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
 	MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+	MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
 };
 
 static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@
 	DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
 		DIV_TOP03, 20, 6),
 
+	DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
+	DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
+	DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
+
+	DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
+	DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
+
+	DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
+	DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
+
 	DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
 	DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
 	DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
 	DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+	DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
 };
 
 static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+	GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
+		ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
+		ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
+		ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
+
+	GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
+		ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
+		ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
+
+	GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
+		ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
+		ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
 		ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
 	GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@
 		ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
 	GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
 		ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+	GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
+		ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@
 	MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
 
 	MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+	MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
+		MUX_SEL_TOP1_FSYS0, 28, 2),
 
 	MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
 	MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@
 
 	DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
 		DIV_TOP1_FSYS0, 24, 4),
+	DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
+		DIV_TOP1_FSYS0, 28, 4),
 
 	DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
 		DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@
 static struct samsung_gate_clock top1_gate_clks[] __initdata = {
 	GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
 		ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+	GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
+		ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
 
 	GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
 		ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@
 /* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
 #define MUX_SEL_PERIC10			0x0200
 #define MUX_SEL_PERIC11			0x0204
+#define MUX_SEL_PERIC12			0x0208
 #define ENABLE_PCLK_PERIC1		0x0900
 #define ENABLE_SCLK_PERIC10		0x0A00
 
@@ -525,10 +617,16 @@
 PNAME(mout_sclk_uart1_p)	= { "fin_pll", "sclk_uart1" };
 PNAME(mout_sclk_uart2_p)	= { "fin_pll", "sclk_uart2" };
 PNAME(mout_sclk_uart3_p)	= { "fin_pll", "sclk_uart3" };
+PNAME(mout_sclk_spi0_p)		= { "fin_pll", "sclk_spi0" };
+PNAME(mout_sclk_spi1_p)		= { "fin_pll", "sclk_spi1" };
+PNAME(mout_sclk_spi2_p)		= { "fin_pll", "sclk_spi2" };
+PNAME(mout_sclk_spi3_p)		= { "fin_pll", "sclk_spi3" };
+PNAME(mout_sclk_spi4_p)		= { "fin_pll", "sclk_spi4" };
 
 static unsigned long peric1_clk_regs[] __initdata = {
 	MUX_SEL_PERIC10,
 	MUX_SEL_PERIC11,
+	MUX_SEL_PERIC12,
 	ENABLE_PCLK_PERIC1,
 	ENABLE_SCLK_PERIC10,
 };
@@ -537,6 +635,16 @@
 	MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
 		MUX_SEL_PERIC10, 0, 1),
 
+	MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
+		MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
+	MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
+		MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
+	MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
+		MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
+	MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
+		MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
+	MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
+		MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
 	MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
 		MUX_SEL_PERIC11, 20, 1),
 	MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@
 		ENABLE_PCLK_PERIC1, 10, 0, 0),
 	GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
 		ENABLE_PCLK_PERIC1, 11, 0, 0),
+	GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 12, 0, 0),
+	GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 13, 0, 0),
+	GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 14, 0, 0),
+	GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 15, 0, 0),
+	GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 16, 0, 0),
+	GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
+	GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 18, 0, 0),
+	GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
+		ENABLE_PCLK_PERIC1, 19, 0, 0),
 
 	GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
 		ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@
 		ENABLE_SCLK_PERIC10, 10, 0, 0),
 	GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
 		ENABLE_SCLK_PERIC10, 11, 0, 0),
+	GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
+		ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
+		ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
+		ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
+		ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
+		ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
+		ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
+		ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
+		ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@
 /* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
 #define MUX_SEL_FSYS00			0x0200
 #define MUX_SEL_FSYS01			0x0204
+#define MUX_SEL_FSYS02			0x0208
+#define ENABLE_ACLK_FSYS00		0x0800
 #define ENABLE_ACLK_FSYS01		0x0804
+#define ENABLE_SCLK_FSYS01		0x0A04
+#define ENABLE_SCLK_FSYS02		0x0A08
+#define ENABLE_SCLK_FSYS04		0x0A10
 
 /*
  * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@
 PNAME(mout_aclk_fsys0_200_p)	= { "fin_pll", "dout_aclk_fsys0_200" };
 PNAME(mout_sclk_mmc2_p)		= { "fin_pll", "sclk_mmc2" };
 
+PNAME(mout_sclk_usbdrd300_p)	= { "fin_pll", "sclk_usbdrd300" };
+PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p)	= { "fin_pll",
+				"phyclk_usbdrd300_udrd30_phyclock" };
+PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p)	= { "fin_pll",
+				"phyclk_usbdrd300_udrd30_pipe_pclk" };
+
+/* fixed rate clocks used in the FSYS0 block */
+struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+	FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
+		CLK_IS_ROOT, 60000000),
+	FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
+		CLK_IS_ROOT, 125000000),
+};
+
 static unsigned long fsys0_clk_regs[] __initdata = {
 	MUX_SEL_FSYS00,
 	MUX_SEL_FSYS01,
+	MUX_SEL_FSYS02,
+	ENABLE_ACLK_FSYS00,
 	ENABLE_ACLK_FSYS01,
+	ENABLE_SCLK_FSYS01,
+	ENABLE_SCLK_FSYS02,
+	ENABLE_SCLK_FSYS04,
 };
 
 static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@
 		MUX_SEL_FSYS00, 24, 1),
 
 	MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+	MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
+		MUX_SEL_FSYS01, 28, 1),
+
+	MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+		mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
+		MUX_SEL_FSYS02, 24, 1),
+	MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+		mout_phyclk_usbdrd300_udrd30_phyclk_p,
+		MUX_SEL_FSYS02, 28, 1),
 };
 
 static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+	GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
+		"mout_aclk_fsys0_200_user",
+		ENABLE_ACLK_FSYS00, 19, 0, 0),
+	GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
+			ENABLE_ACLK_FSYS00, 3, 0, 0),
+	GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
+			ENABLE_ACLK_FSYS00, 4, 0, 0),
+
+	GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
+		ENABLE_ACLK_FSYS01, 29, 0, 0),
 	GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
 		ENABLE_ACLK_FSYS01, 31, 0, 0),
+
+	GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
+		"mout_sclk_usbdrd300_user",
+		ENABLE_SCLK_FSYS01, 4, 0, 0),
+	GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
+		ENABLE_SCLK_FSYS01, 8, 0, 0),
+
+	GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
+		"phyclk_usbdrd300_udrd30_pipe_pclk_user",
+		"mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+		ENABLE_SCLK_FSYS02, 24, 0, 0),
+	GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
+		"phyclk_usbdrd300_udrd30_phyclk_user",
+		"mout_phyclk_usbdrd300_udrd30_phyclk_user",
+		ENABLE_SCLK_FSYS02, 28, 0, 0),
+
+	GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
+		"fin_pll",
+		ENABLE_SCLK_FSYS04, 28, 0, 0),
 };
 
 static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@
 
 CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
 	exynos7_clk_fsys1_init);
+
+#define MUX_SEL_MSCL			0x0200
+#define DIV_MSCL			0x0600
+#define ENABLE_ACLK_MSCL		0x0800
+#define ENABLE_PCLK_MSCL		0x0900
+
+/* List of parent clocks for Muxes in CMU_MSCL */
+PNAME(mout_aclk_mscl_532_user_p)	= { "fin_pll", "aclk_mscl_532" };
+
+static unsigned long mscl_clk_regs[] __initdata = {
+	MUX_SEL_MSCL,
+	DIV_MSCL,
+	ENABLE_ACLK_MSCL,
+	ENABLE_PCLK_MSCL,
+};
+
+static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+	MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
+		mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
+};
+static struct samsung_div_clock mscl_div_clks[] __initdata = {
+	DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
+			DIV_MSCL, 0, 3),
+};
+static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+
+	GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 31, 0, 0),
+	GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 30, 0, 0),
+	GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 29, 0, 0),
+	GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 28, 0, 0),
+	GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
+			"usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 27, 0, 0),
+	GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
+			"usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 26, 0, 0),
+	GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 25, 0, 0),
+	GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 24, 0, 0),
+	GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
+			"usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 23, 0, 0),
+	GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 22, 0, 0),
+	GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 21, 0, 0),
+	GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 20, 0, 0),
+	GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 19, 0, 0),
+	GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 18, 0, 0),
+	GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 17, 0, 0),
+	GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 16, 0, 0),
+	GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
+			"usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 15, 0, 0),
+	GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
+			"usermux_aclk_mscl_532",
+			ENABLE_ACLK_MSCL, 14, 0, 0),
+
+	GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 31, 0, 0),
+	GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 30, 0, 0),
+	GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 29, 0, 0),
+	GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 28, 0, 0),
+	GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 27, 0, 0),
+	GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 26, 0, 0),
+	GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 25, 0, 0),
+	GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 24, 0, 0),
+	GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 23, 0, 0),
+	GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 22, 0, 0),
+	GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 21, 0, 0),
+	GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
+			ENABLE_PCLK_MSCL, 20, 0, 0),
+};
+
+static struct samsung_cmu_info mscl_cmu_info __initdata = {
+	.mux_clks		= mscl_mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(mscl_mux_clks),
+	.div_clks		= mscl_div_clks,
+	.nr_div_clks		= ARRAY_SIZE(mscl_div_clks),
+	.gate_clks		= mscl_gate_clks,
+	.nr_gate_clks		= ARRAY_SIZE(mscl_gate_clks),
+	.nr_clk_ids		= MSCL_NR_CLK,
+	.clk_regs		= mscl_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(mscl_clk_regs),
+};
+
+static void __init exynos7_clk_mscl_init(struct device_node *np)
+{
+	samsung_cmu_register_one(np, &mscl_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
+		exynos7_clk_mscl_init);
+
+/* Register Offset definitions for CMU_AUD (0x114C0000) */
+#define	MUX_SEL_AUD			0x0200
+#define	DIV_AUD0			0x0600
+#define	DIV_AUD1			0x0604
+#define	ENABLE_ACLK_AUD			0x0800
+#define	ENABLE_PCLK_AUD			0x0900
+#define	ENABLE_SCLK_AUD			0x0A00
+
+/*
+ * List of parent clocks for Muxes in CMU_AUD
+ */
+PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
+PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
+
+static unsigned long aud_clk_regs[] __initdata = {
+	MUX_SEL_AUD,
+	DIV_AUD0,
+	DIV_AUD1,
+	ENABLE_ACLK_AUD,
+	ENABLE_PCLK_AUD,
+	ENABLE_SCLK_AUD,
+};
+
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+	MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
+	MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
+	MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
+};
+
+static struct samsung_div_clock aud_div_clks[] __initdata = {
+	DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
+	DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
+	DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
+
+	DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
+	DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
+	DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
+	DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
+	DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
+};
+
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+	GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
+			ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+	GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
+			ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
+	GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
+	GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
+			ENABLE_SCLK_AUD, 30, 0, 0),
+
+	GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
+	GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
+	GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
+	GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
+	GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
+	GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
+	GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
+			ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
+	GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
+			ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+	GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
+	GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
+
+	GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
+	GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
+			 ENABLE_ACLK_AUD, 28, 0, 0),
+	GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
+};
+
+static struct samsung_cmu_info aud_cmu_info __initdata = {
+	.mux_clks		= aud_mux_clks,
+	.nr_mux_clks		= ARRAY_SIZE(aud_mux_clks),
+	.div_clks		= aud_div_clks,
+	.nr_div_clks		= ARRAY_SIZE(aud_div_clks),
+	.gate_clks		= aud_gate_clks,
+	.nr_gate_clks		= ARRAY_SIZE(aud_gate_clks),
+	.nr_clk_ids		= AUD_NR_CLK,
+	.clk_regs		= aud_clk_regs,
+	.nr_clk_regs		= ARRAY_SIZE(aud_clk_regs),
+};
+
+static void __init exynos7_clk_aud_init(struct device_node *np)
+{
+	samsung_cmu_register_one(np, &aud_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
+		exynos7_clk_aud_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 4bda540..9e1f88c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -374,19 +374,24 @@
  * Common function which registers plls, muxes, dividers and gates
  * for each CMU. It also add CMU register list to register cache.
  */
-void __init samsung_cmu_register_one(struct device_node *np,
+struct samsung_clk_provider * __init samsung_cmu_register_one(
+			struct device_node *np,
 			struct samsung_cmu_info *cmu)
 {
 	void __iomem *reg_base;
 	struct samsung_clk_provider *ctx;
 
 	reg_base = of_iomap(np, 0);
-	if (!reg_base)
+	if (!reg_base) {
 		panic("%s: failed to map registers\n", __func__);
+		return NULL;
+	}
 
 	ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
-	if (!ctx)
+	if (!ctx) {
 		panic("%s: unable to alllocate ctx\n", __func__);
+		return ctx;
+	}
 
 	if (cmu->pll_clks)
 		samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@
 			cmu->nr_clk_regs);
 
 	samsung_clk_of_add_provider(np, ctx);
+
+	return ctx;
 }
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 8acabe1..e4c7538 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -392,7 +392,8 @@
 			struct samsung_pll_clock *pll_list,
 			unsigned int nr_clk, void __iomem *base);
 
-extern void __init samsung_cmu_register_one(struct device_node *,
+extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+			struct device_node *,
 			struct samsung_cmu_info *);
 
 extern unsigned long _get_rate(const char *clk_name);
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile
index f83980f..0689d7f 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/shmobile/Makefile
@@ -1,9 +1,11 @@
 obj-$(CONFIG_ARCH_EMEV2)		+= clk-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)		+= clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4)		+= clk-r8a73a4.o
 obj-$(CONFIG_ARCH_R8A7740)		+= clk-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7779)		+= clk-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)		+= clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7791)		+= clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7793)		+= clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7794)		+= clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_SH73A0)		+= clk-sh73a0.o
 obj-$(CONFIG_ARCH_SHMOBILE_MULTI)	+= clk-div6.o
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index 639241e..036a692 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -54,12 +54,19 @@
 static void cpg_div6_clock_disable(struct clk_hw *hw)
 {
 	struct div6_clock *clock = to_div6_clock(hw);
+	u32 val;
 
-	/* DIV6 clocks require the divisor field to be non-zero when stopping
-	 * the clock.
+	val = clk_readl(clock->reg);
+	val |= CPG_DIV6_CKSTP;
+	/*
+	 * DIV6 clocks require the divisor field to be non-zero when stopping
+	 * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
+	 * re-enabled later if the divisor field is changed when stopping the
+	 * clock
 	 */
-	clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
-		   clock->reg);
+	if (!(val & CPG_DIV6_DIV_MASK))
+		val |= CPG_DIV6_DIV_MASK;
+	clk_writel(val, clock->reg);
 }
 
 static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@
 {
 	unsigned int div;
 
+	if (!rate)
+		rate = 1;
+
 	div = DIV_ROUND_CLOSEST(parent_rate, rate);
 	return clamp_t(unsigned int, div, 1, 64);
 }
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644
index 0000000..29b9a0b0
--- /dev/null
+++ b/drivers/clk/shmobile/clk-r8a73a4.c
@@ -0,0 +1,241 @@
+/*
+ * r8a73a4 Core CPG Clocks
+ *
+ * Copyright (C) 2014  Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct r8a73a4_cpg {
+	struct clk_onecell_data data;
+	spinlock_t lock;
+	void __iomem *reg;
+};
+
+#define CPG_CKSCR	0xc0
+#define CPG_FRQCRA	0x00
+#define CPG_FRQCRB	0x04
+#define CPG_FRQCRC	0xe0
+#define CPG_PLL0CR	0xd8
+#define CPG_PLL1CR	0x28
+#define CPG_PLL2CR	0x2c
+#define CPG_PLL2HCR	0xe4
+#define CPG_PLL2SCR	0xf4
+
+#define CLK_ENABLE_ON_INIT BIT(0)
+
+struct div4_clk {
+	const char *name;
+	unsigned int reg;
+	unsigned int shift;
+};
+
+static struct div4_clk div4_clks[] = {
+	{ "i",	CPG_FRQCRA, 20 },
+	{ "m3", CPG_FRQCRA, 12 },
+	{ "b",	CPG_FRQCRA,  8 },
+	{ "m1", CPG_FRQCRA,  4 },
+	{ "m2", CPG_FRQCRA,  0 },
+	{ "zx", CPG_FRQCRB, 12 },
+	{ "zs", CPG_FRQCRB,  8 },
+	{ "hp", CPG_FRQCRB,  4 },
+	{ NULL, 0, 0 },
+};
+
+static const struct clk_div_table div4_div_table[] = {
+	{ 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
+	{ 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
+	{ 12, 10 }, { 0, 0 }
+};
+
+static struct clk * __init
+r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
+			     const char *name)
+{
+	const struct clk_div_table *table = NULL;
+	const char *parent_name;
+	unsigned int shift, reg;
+	unsigned int mult = 1;
+	unsigned int div = 1;
+
+
+	if (!strcmp(name, "main")) {
+		u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+
+		switch ((ckscr >> 28) & 3) {
+		case 0:	/* extal1 */
+			parent_name = of_clk_get_parent_name(np, 0);
+			break;
+		case 1:	/* extal1 / 2 */
+			parent_name = of_clk_get_parent_name(np, 0);
+			div = 2;
+			break;
+		case 2: /* extal2 */
+			parent_name = of_clk_get_parent_name(np, 1);
+			break;
+		case 3: /* extal2 / 2 */
+			parent_name = of_clk_get_parent_name(np, 1);
+			div = 2;
+			break;
+		}
+	} else if (!strcmp(name, "pll0")) {
+		/* PLL0/1 are configurable multiplier clocks. Register them as
+		 * fixed factor clocks for now as there's no generic multiplier
+		 * clock implementation and we currently have no need to change
+		 * the multiplier value.
+		 */
+		u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+
+		parent_name = "main";
+		mult = ((value >> 24) & 0x7f) + 1;
+		if (value & BIT(20))
+			div = 2;
+	} else if (!strcmp(name, "pll1")) {
+		u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+
+		parent_name = "main";
+		/* XXX: enable bit? */
+		mult = ((value >> 24) & 0x7f) + 1;
+		if (value & BIT(7))
+			div = 2;
+	} else if (!strncmp(name, "pll2", 4)) {
+		u32 value, cr;
+
+		switch (name[4]) {
+		case 0:
+			cr = CPG_PLL2CR;
+			break;
+		case 's':
+			cr = CPG_PLL2SCR;
+			break;
+		case 'h':
+			cr = CPG_PLL2HCR;
+			break;
+		default:
+			return ERR_PTR(-EINVAL);
+		}
+		value = clk_readl(cpg->reg + cr);
+		switch ((value >> 5) & 7) {
+		case 0:
+			parent_name = "main";
+			div = 2;
+			break;
+		case 1:
+			parent_name = "extal2";
+			div = 2;
+			break;
+		case 3:
+			parent_name = "extal2";
+			div = 4;
+			break;
+		case 4:
+			parent_name = "main";
+			break;
+		case 5:
+			parent_name = "extal2";
+			break;
+		default:
+			pr_warn("%s: unexpected parent of %s\n", __func__,
+				name);
+			return ERR_PTR(-EINVAL);
+		}
+		/* XXX: enable bit? */
+		mult = ((value >> 24) & 0x7f) + 1;
+	} else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
+		u32 shift = 8;
+
+		parent_name = "pll0";
+		if (name[1] == '2') {
+			div = 2;
+			shift = 0;
+		}
+		div *= 32;
+		mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
+		       & 0x1f);
+	} else {
+		struct div4_clk *c;
+
+		for (c = div4_clks; c->name; c++) {
+			if (!strcmp(name, c->name))
+				break;
+		}
+		if (!c->name)
+			return ERR_PTR(-EINVAL);
+
+		parent_name = "pll1";
+		table = div4_div_table;
+		reg = c->reg;
+		shift = c->shift;
+	}
+
+	if (!table) {
+		return clk_register_fixed_factor(NULL, name, parent_name, 0,
+						 mult, div);
+	} else {
+		return clk_register_divider_table(NULL, name, parent_name, 0,
+						  cpg->reg + reg, shift, 4, 0,
+						  table, &cpg->lock);
+	}
+}
+
+static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
+{
+	struct r8a73a4_cpg *cpg;
+	struct clk **clks;
+	unsigned int i;
+	int num_clks;
+
+	num_clks = of_property_count_strings(np, "clock-output-names");
+	if (num_clks < 0) {
+		pr_err("%s: failed to count clocks\n", __func__);
+		return;
+	}
+
+	cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+	clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
+	if (cpg == NULL || clks == NULL) {
+		/* We're leaking memory on purpose, there's no point in cleaning
+		 * up as the system won't boot anyway.
+		 */
+		return;
+	}
+
+	spin_lock_init(&cpg->lock);
+
+	cpg->data.clks = clks;
+	cpg->data.clk_num = num_clks;
+
+	cpg->reg = of_iomap(np, 0);
+	if (WARN_ON(cpg->reg == NULL))
+		return;
+
+	for (i = 0; i < num_clks; ++i) {
+		const char *name;
+		struct clk *clk;
+
+		of_property_read_string_index(np, "clock-output-names", i,
+					      &name);
+
+		clk = r8a73a4_cpg_register_clock(np, cpg, name);
+		if (IS_ERR(clk))
+			pr_err("%s: failed to register %s %s clock (%ld)\n",
+			       __func__, np->name, name, PTR_ERR(clk));
+		else
+			cpg->data.clks[i] = clk;
+	}
+
+	of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
+	       r8a73a4_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index e996425..acfb6d7 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -33,6 +33,8 @@
 #define CPG_FRQCRC			0x000000e0
 #define CPG_FRQCRC_ZFC_MASK		(0x1f << 8)
 #define CPG_FRQCRC_ZFC_SHIFT		8
+#define CPG_ADSPCKCR			0x0000025c
+#define CPG_RCANCKCR			0x00000270
 
 /* -----------------------------------------------------------------------------
  * Z Clock
@@ -161,6 +163,88 @@
 	return clk;
 }
 
+static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
+						 struct device_node *np)
+{
+	const char *parent_name = of_clk_get_parent_name(np, 1);
+	struct clk_fixed_factor *fixed;
+	struct clk_gate *gate;
+	struct clk *clk;
+
+	fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+	if (!fixed)
+		return ERR_PTR(-ENOMEM);
+
+	fixed->mult = 1;
+	fixed->div = 6;
+
+	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+	if (!gate) {
+		kfree(fixed);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	gate->reg = cpg->reg + CPG_RCANCKCR;
+	gate->bit_idx = 8;
+	gate->flags = CLK_GATE_SET_TO_DISABLE;
+	gate->lock = &cpg->lock;
+
+	clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
+				     &fixed->hw, &clk_fixed_factor_ops,
+				     &gate->hw, &clk_gate_ops, 0);
+	if (IS_ERR(clk)) {
+		kfree(gate);
+		kfree(fixed);
+	}
+
+	return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+	{  1,  3 }, {  2,  4 }, {  3,  6 }, {  4,  8 },
+	{  5, 12 }, {  6, 16 }, {  7, 18 }, {  8, 24 },
+	{ 10, 36 }, { 11, 48 }, {  0,  0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
+{
+	const char *parent_name = "pll1";
+	struct clk_divider *div;
+	struct clk_gate *gate;
+	struct clk *clk;
+
+	div = kzalloc(sizeof(*div), GFP_KERNEL);
+	if (!div)
+		return ERR_PTR(-ENOMEM);
+
+	div->reg = cpg->reg + CPG_ADSPCKCR;
+	div->width = 4;
+	div->table = cpg_adsp_div_table;
+	div->lock = &cpg->lock;
+
+	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+	if (!gate) {
+		kfree(div);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	gate->reg = cpg->reg + CPG_ADSPCKCR;
+	gate->bit_idx = 8;
+	gate->flags = CLK_GATE_SET_TO_DISABLE;
+	gate->lock = &cpg->lock;
+
+	clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
+				     &div->hw, &clk_divider_ops,
+				     &gate->hw, &clk_gate_ops, 0);
+	if (IS_ERR(clk)) {
+		kfree(gate);
+		kfree(div);
+	}
+
+	return clk;
+}
+
 /* -----------------------------------------------------------------------------
  * CPG Clock Data
  */
@@ -263,6 +347,10 @@
 		shift = 0;
 	} else if (!strcmp(name, "z")) {
 		return cpg_z_clk_register(cpg);
+	} else if (!strcmp(name, "rcan")) {
+		return cpg_rcan_clk_register(cpg, np);
+	} else if (!strcmp(name, "adsp")) {
+		return cpg_adsp_clk_register(cpg);
 	} else {
 		return ERR_PTR(-EINVAL);
 	}
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 2282cef..bf12a25 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -37,8 +37,8 @@
 	struct clk_hw *pgate_hw = &flexgen->pgate.hw;
 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-	pgate_hw->clk = hw->clk;
-	fgate_hw->clk = hw->clk;
+	__clk_hw_set_clk(pgate_hw, hw);
+	__clk_hw_set_clk(fgate_hw, hw);
 
 	clk_gate_ops.enable(pgate_hw);
 
@@ -54,7 +54,7 @@
 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
 	/* disable only the final gate */
-	fgate_hw->clk = hw->clk;
+	__clk_hw_set_clk(fgate_hw, hw);
 
 	clk_gate_ops.disable(fgate_hw);
 
@@ -66,7 +66,7 @@
 	struct flexgen *flexgen = to_flexgen(hw);
 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-	fgate_hw->clk = hw->clk;
+	__clk_hw_set_clk(fgate_hw, hw);
 
 	if (!clk_gate_ops.is_enabled(fgate_hw))
 		return 0;
@@ -79,7 +79,7 @@
 	struct flexgen *flexgen = to_flexgen(hw);
 	struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return clk_mux_ops.get_parent(mux_hw);
 }
@@ -89,7 +89,7 @@
 	struct flexgen *flexgen = to_flexgen(hw);
 	struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return clk_mux_ops.set_parent(mux_hw, index);
 }
@@ -124,8 +124,8 @@
 	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
 	unsigned long mid_rate;
 
-	pdiv_hw->clk = hw->clk;
-	fdiv_hw->clk = hw->clk;
+	__clk_hw_set_clk(pdiv_hw, hw);
+	__clk_hw_set_clk(fdiv_hw, hw);
 
 	mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
 
@@ -138,16 +138,27 @@
 	struct flexgen *flexgen = to_flexgen(hw);
 	struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
 	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
-	unsigned long primary_div = 0;
+	unsigned long div = 0;
 	int ret = 0;
 
-	pdiv_hw->clk = hw->clk;
-	fdiv_hw->clk = hw->clk;
+	__clk_hw_set_clk(pdiv_hw, hw);
+	__clk_hw_set_clk(fdiv_hw, hw);
 
-	primary_div = clk_best_div(parent_rate, rate);
+	div = clk_best_div(parent_rate, rate);
 
-	clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
-	ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+	/*
+	* pdiv is mainly targeted for low freq results, while fdiv
+	* should be used for div <= 64. The other way round can
+	* lead to 'duty cycle' issues.
+	*/
+
+	if (div <= 64) {
+		clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
+		ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
+	} else {
+		clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+		ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
+	}
 
 	return ret;
 }
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 79dc40b..9a15ec3 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -94,7 +94,7 @@
 	unsigned long timeout;
 	int ret = 0;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
 	if (ret)
@@ -116,7 +116,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *mux_hw = &genamux->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
 }
@@ -126,7 +126,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *mux_hw = &genamux->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
 }
@@ -136,7 +136,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *mux_hw = &genamux->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
 	if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return clk_divider_ops.recalc_rate(div_hw, parent_rate);
 }
@@ -185,7 +185,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
 }
@@ -196,7 +196,7 @@
 	struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
 	struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return clk_divider_ops.round_rate(div_hw, rate, prate);
 }
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index a66953c..3a5292e 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -8,6 +8,7 @@
 obj-y += clk-mod0.o
 obj-y += clk-sun8i-mbus.o
 obj-y += clk-sun9i-core.o
+obj-y += clk-sun9i-mmc.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
 	clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 62e08fb..8c20190 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -80,6 +80,8 @@
 }
 
 static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
+				       unsigned long min_rate,
+				       unsigned long max_rate,
 				       unsigned long *best_parent_rate,
 				       struct clk_hw **best_parent_p)
 {
@@ -156,9 +158,10 @@
 	.set_rate = clk_factors_set_rate,
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-					   const struct factors_data *data,
-					   spinlock_t *lock)
+struct clk *sunxi_factors_register(struct device_node *node,
+				   const struct factors_data *data,
+				   spinlock_t *lock,
+				   void __iomem *reg)
 {
 	struct clk *clk;
 	struct clk_factors *factors;
@@ -168,11 +171,8 @@
 	struct clk_hw *mux_hw = NULL;
 	const char *clk_name = node->name;
 	const char *parents[FACTORS_MAX_PARENTS];
-	void __iomem *reg;
 	int i = 0;
 
-	reg = of_iomap(node, 0);
-
 	/* if we have a mux, we will have >1 parents */
 	while (i < FACTORS_MAX_PARENTS &&
 	       (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 912238f..171085a 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -36,8 +36,9 @@
 	spinlock_t *lock;
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-					   const struct factors_data *data,
-					   spinlock_t *lock);
+struct clk *sunxi_factors_register(struct device_node *node,
+				   const struct factors_data *data,
+				   spinlock_t *lock,
+				   void __iomem *reg);
 
 #endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index da0524ea..ec8f5a1 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -17,6 +17,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 
 #include "clk-factors.h"
 
@@ -67,7 +68,7 @@
 	.pwidth = 2,
 };
 
-static const struct factors_data sun4i_a10_mod0_data __initconst = {
+static const struct factors_data sun4i_a10_mod0_data = {
 	.enable = 31,
 	.mux = 24,
 	.muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@
 
 static void __init sun4i_a10_mod0_setup(struct device_node *node)
 {
-	sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock);
+	void __iomem *reg;
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		/*
+		 * This happens with mod0 clk nodes instantiated through
+		 * mfd, as those do not have their resources assigned at
+		 * CLK_OF_DECLARE time yet, so do not print an error.
+		 */
+		return;
+	}
+
+	sunxi_factors_register(node, &sun4i_a10_mod0_data,
+			       &sun4i_a10_mod0_lock, reg);
 }
 CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
 
+static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *r;
+	void __iomem *reg;
+
+	if (!np)
+		return -ENODEV;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(reg))
+		return PTR_ERR(reg);
+
+	sunxi_factors_register(np, &sun4i_a10_mod0_data,
+			       &sun4i_a10_mod0_lock, reg);
+	return 0;
+}
+
+static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
+	{ .compatible = "allwinner,sun4i-a10-mod0-clk" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver sun4i_a10_mod0_clk_driver = {
+	.driver = {
+		.name = "sun4i-a10-mod0-clk",
+		.of_match_table = sun4i_a10_mod0_clk_dt_ids,
+	},
+	.probe = sun4i_a10_mod0_clk_probe,
+};
+module_platform_driver(sun4i_a10_mod0_clk_driver);
+
+static const struct factors_data sun9i_a80_mod0_data __initconst = {
+	.enable = 31,
+	.mux = 24,
+	.muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+	.table = &sun4i_a10_mod0_config,
+	.getter = sun4i_a10_get_mod0_factors,
+};
+
+static void __init sun9i_a80_mod0_setup(struct device_node *node)
+{
+	void __iomem *reg;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (IS_ERR(reg)) {
+		pr_err("Could not get registers for mod0-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	sunxi_factors_register(node, &sun9i_a80_mod0_data,
+			       &sun4i_a10_mod0_lock, reg);
+}
+CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
+
 static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
 
 static void __init sun5i_a13_mbus_setup(struct device_node *node)
 {
-	struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock);
+	struct clk *mbus;
+	void __iomem *reg;
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("Could not get registers for a13-mbus-clk\n");
+		return;
+	}
+
+	mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
+				      &sun5i_a13_mbus_lock, reg);
 
 	/* The MBUS clocks needs to be always enabled */
 	__clk_get(mbus);
@@ -95,14 +176,10 @@
 }
 CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
 
-struct mmc_phase_data {
-	u8	offset;
-};
-
 struct mmc_phase {
 	struct clk_hw		hw;
+	u8			offset;
 	void __iomem		*reg;
-	struct mmc_phase_data	*data;
 	spinlock_t		*lock;
 };
 
@@ -118,7 +195,7 @@
 	u8 delay;
 
 	value = readl(phase->reg);
-	delay = (value >> phase->data->offset) & 0x3;
+	delay = (value >> phase->offset) & 0x3;
 
 	if (!delay)
 		return 180;
@@ -206,8 +283,8 @@
 
 	spin_lock_irqsave(phase->lock, flags);
 	value = readl(phase->reg);
-	value &= ~GENMASK(phase->data->offset + 3, phase->data->offset);
-	value |= delay << phase->data->offset;
+	value &= ~GENMASK(phase->offset + 3, phase->offset);
+	value |= delay << phase->offset;
 	writel(value, phase->reg);
 	spin_unlock_irqrestore(phase->lock, flags);
 
@@ -219,66 +296,97 @@
 	.set_phase	= mmc_set_phase,
 };
 
-static void __init sun4i_a10_mmc_phase_setup(struct device_node *node,
-					     struct mmc_phase_data *data)
+/*
+ * sunxi_mmc_setup - Common setup function for mmc module clocks
+ *
+ * The only difference between module clocks on different platforms is the
+ * width of the mux register bits and the valid values, which are passed in
+ * through struct factors_data. The phase clocks parts are identical.
+ */
+static void __init sunxi_mmc_setup(struct device_node *node,
+				   const struct factors_data *data,
+				   spinlock_t *lock)
 {
-	const char *parent_names[1] = { of_clk_get_parent_name(node, 0) };
-	struct clk_init_data init = {
-		.num_parents	= 1,
-		.parent_names	= parent_names,
-		.ops		= &mmc_clk_ops,
-	};
+	struct clk_onecell_data *clk_data;
+	const char *parent;
+	void __iomem *reg;
+	int i;
 
-	struct mmc_phase *phase;
-	struct clk *clk;
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (IS_ERR(reg)) {
+		pr_err("Couldn't map the %s clock registers\n", node->name);
+		return;
+	}
 
-	phase = kmalloc(sizeof(*phase), GFP_KERNEL);
-	if (!phase)
+	clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
 		return;
 
-	phase->hw.init = &init;
+	clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto err_free_data;
 
-	phase->reg = of_iomap(node, 0);
-	if (!phase->reg)
-		goto err_free;
+	clk_data->clk_num = 3;
+	clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
+	if (!clk_data->clks[0])
+		goto err_free_clks;
 
-	phase->data = data;
-	phase->lock = &sun4i_a10_mod0_lock;
+	parent = __clk_get_name(clk_data->clks[0]);
 
-	if (of_property_read_string(node, "clock-output-names", &init.name))
-		init.name = node->name;
+	for (i = 1; i < 3; i++) {
+		struct clk_init_data init = {
+			.num_parents	= 1,
+			.parent_names	= &parent,
+			.ops		= &mmc_clk_ops,
+		};
+		struct mmc_phase *phase;
 
-	clk = clk_register(NULL, &phase->hw);
-	if (IS_ERR(clk))
-		goto err_unmap;
+		phase = kmalloc(sizeof(*phase), GFP_KERNEL);
+		if (!phase)
+			continue;
 
-	of_clk_add_provider(node, of_clk_src_simple_get, clk);
+		phase->hw.init = &init;
+		phase->reg = reg;
+		phase->lock = lock;
+
+		if (i == 1)
+			phase->offset = 8;
+		else
+			phase->offset = 20;
+
+		if (of_property_read_string_index(node, "clock-output-names",
+						  i, &init.name))
+			init.name = node->name;
+
+		clk_data->clks[i] = clk_register(NULL, &phase->hw);
+		if (IS_ERR(clk_data->clks[i])) {
+			kfree(phase);
+			continue;
+		}
+	}
+
+	of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
 	return;
 
-err_unmap:
-	iounmap(phase->reg);
-err_free:
-	kfree(phase);
+err_free_clks:
+	kfree(clk_data->clks);
+err_free_data:
+	kfree(clk_data);
 }
 
+static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
 
-static struct mmc_phase_data mmc_output_clk = {
-	.offset	= 8,
-};
-
-static struct mmc_phase_data mmc_sample_clk = {
-	.offset	= 20,
-};
-
-static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
+static void __init sun4i_a10_mmc_setup(struct device_node *node)
 {
-	sun4i_a10_mmc_phase_setup(node, &mmc_output_clk);
+	sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup);
+CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
 
-static void __init sun4i_a10_mmc_sample_setup(struct device_node *node)
+static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
+
+static void __init sun9i_a80_mmc_setup(struct device_node *node)
 {
-	sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk);
+	sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup);
+CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 3d282fb..63cf149 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -45,6 +45,8 @@
 }
 
 static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
+				 unsigned long min_rate,
+				 unsigned long max_rate,
 				 unsigned long *best_parent_rate,
 				 struct clk_hw **best_parent_clk)
 {
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index ef49786..14cd026 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -69,8 +69,17 @@
 
 static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-	struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-						  &sun8i_a23_mbus_lock);
+	struct clk *mbus;
+	void __iomem *reg;
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("Could not get registers for a23-mbus-clk\n");
+		return;
+	}
+
+	mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
+				      &sun8i_a23_mbus_lock, reg);
 
 	/* The MBUS clocks needs to be always enabled */
 	__clk_get(mbus);
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 3cb9036..d8da77d 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -24,50 +24,51 @@
 
 
 /**
- * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
  * PLL4 rate is calculated as follows
  * rate = (parent_rate * n >> p) / (m + 1);
- * parent_rate is always 24Mhz
+ * parent_rate is always 24MHz
  *
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
 static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-				       u8 *n, u8 *k, u8 *m, u8 *p)
+				       u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
 {
-	int div;
+	int n;
+	int m = 1;
+	int p = 1;
 
-	/* Normalize value to a 6M multiple */
-	div = DIV_ROUND_UP(*freq, 6000000);
+	/* Normalize value to a 6 MHz multiple (24 MHz / 4) */
+	n = DIV_ROUND_UP(*freq, 6000000);
 
-	/* divs above 256 cannot be odd */
-	if (div > 256)
-		div = round_up(div, 2);
+	/* If n is too large switch to steps of 12 MHz */
+	if (n > 255) {
+		m = 0;
+		n = (n + 1) / 2;
+	}
 
-	/* divs above 512 must be a multiple of 4 */
-	if (div > 512)
-		div = round_up(div, 4);
+	/* If n is still too large switch to steps of 24 MHz */
+	if (n > 255) {
+		p = 0;
+		n = (n + 1) / 2;
+	}
 
-	*freq = 6000000 * div;
+	/* n must be between 12 and 255 */
+	if (n > 255)
+		n = 255;
+	else if (n < 12)
+		n = 12;
+
+	*freq = ((24000000 * n) >> p) / (m + 1);
 
 	/* we were called to round the frequency, we can now return */
-	if (n == NULL)
+	if (n_ret == NULL)
 		return;
 
-	/* p will be 1 for divs under 512 */
-	if (div < 512)
-		*p = 1;
-	else
-		*p = 0;
-
-	/* m will be 1 if div is odd */
-	if (div & 1)
-		*m = 1;
-	else
-		*m = 0;
-
-	/* calculate a suitable n based on m and p */
-	*n = div / (*p + 1) / (*m + 1);
+	*n_ret = n;
+	*m_ret = m;
+	*p_ret = p;
 }
 
 static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@
 
 static void __init sun9i_a80_pll4_setup(struct device_node *node)
 {
-	sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+	void __iomem *reg;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (!reg) {
+		pr_err("Could not get registers for a80-pll4-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	sunxi_factors_register(node, &sun9i_a80_pll4_data,
+			       &sun9i_a80_pll4_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
 
@@ -139,8 +150,18 @@
 
 static void __init sun9i_a80_gt_setup(struct device_node *node)
 {
-	struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
-						&sun9i_a80_gt_lock);
+	void __iomem *reg;
+	struct clk *gt;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (!reg) {
+		pr_err("Could not get registers for a80-gt-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+				    &sun9i_a80_gt_lock, reg);
 
 	/* The GT bus clock needs to be always enabled */
 	__clk_get(gt);
@@ -194,7 +215,17 @@
 
 static void __init sun9i_a80_ahb_setup(struct device_node *node)
 {
-	sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+	void __iomem *reg;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (!reg) {
+		pr_err("Could not get registers for a80-ahb-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	sunxi_factors_register(node, &sun9i_a80_ahb_data,
+			       &sun9i_a80_ahb_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
 
@@ -210,7 +241,17 @@
 
 static void __init sun9i_a80_apb0_setup(struct device_node *node)
 {
-	sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+	void __iomem *reg;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (!reg) {
+		pr_err("Could not get registers for a80-apb0-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	sunxi_factors_register(node, &sun9i_a80_apb0_data,
+			       &sun9i_a80_apb0_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
 
@@ -266,6 +307,16 @@
 
 static void __init sun9i_a80_apb1_setup(struct device_node *node)
 {
-	sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+	void __iomem *reg;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (!reg) {
+		pr_err("Could not get registers for a80-apb1-clk: %s\n",
+		       node->name);
+		return;
+	}
+
+	sunxi_factors_register(node, &sun9i_a80_apb1_data,
+			       &sun9i_a80_apb1_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644
index 0000000..710c273
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai	<wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#define SUN9I_MMC_WIDTH		4
+
+#define SUN9I_MMC_GATE_BIT	16
+#define SUN9I_MMC_RESET_BIT	18
+
+struct sun9i_mmc_clk_data {
+	spinlock_t			lock;
+	void __iomem			*membase;
+	struct clk			*clk;
+	struct reset_control		*reset;
+	struct clk_onecell_data		clk_data;
+	struct reset_controller_dev	rcdev;
+};
+
+static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct sun9i_mmc_clk_data *data = container_of(rcdev,
+						       struct sun9i_mmc_clk_data,
+						       rcdev);
+	unsigned long flags;
+	void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+	u32 val;
+
+	clk_prepare_enable(data->clk);
+	spin_lock_irqsave(&data->lock, flags);
+
+	val = readl(reg);
+	writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
+
+	spin_unlock_irqrestore(&data->lock, flags);
+	clk_disable_unprepare(data->clk);
+
+	return 0;
+}
+
+static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	struct sun9i_mmc_clk_data *data = container_of(rcdev,
+						       struct sun9i_mmc_clk_data,
+						       rcdev);
+	unsigned long flags;
+	void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+	u32 val;
+
+	clk_prepare_enable(data->clk);
+	spin_lock_irqsave(&data->lock, flags);
+
+	val = readl(reg);
+	writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
+
+	spin_unlock_irqrestore(&data->lock, flags);
+	clk_disable_unprepare(data->clk);
+
+	return 0;
+}
+
+static struct reset_control_ops sun9i_mmc_reset_ops = {
+	.assert		= sun9i_mmc_reset_assert,
+	.deassert	= sun9i_mmc_reset_deassert,
+};
+
+static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct sun9i_mmc_clk_data *data;
+	struct clk_onecell_data *clk_data;
+	const char *clk_name = np->name;
+	const char *clk_parent;
+	struct resource *r;
+	int count, i, ret;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock_init(&data->lock);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	/* one clock/reset pair per word */
+	count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
+	data->membase = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(data->membase))
+		return PTR_ERR(data->membase);
+
+	clk_data = &data->clk_data;
+	clk_data->clk_num = count;
+	clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
+				      GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	data->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(data->clk)) {
+		dev_err(&pdev->dev, "Could not get clock\n");
+		return PTR_ERR(data->clk);
+	}
+
+	data->reset = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(data->reset)) {
+		dev_err(&pdev->dev, "Could not get reset control\n");
+		return PTR_ERR(data->reset);
+	}
+
+	ret = reset_control_deassert(data->reset);
+	if (ret) {
+		dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
+		return ret;
+	}
+
+	clk_parent = __clk_get_name(data->clk);
+	for (i = 0; i < count; i++) {
+		of_property_read_string_index(np, "clock-output-names",
+					      i, &clk_name);
+
+		clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+						      clk_parent, 0,
+						      data->membase + SUN9I_MMC_WIDTH * i,
+						      SUN9I_MMC_GATE_BIT, 0,
+						      &data->lock);
+
+		if (IS_ERR(clk_data->clks[i])) {
+			ret = PTR_ERR(clk_data->clks[i]);
+			goto err_clk_register;
+		}
+	}
+
+	ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+	if (ret)
+		goto err_clk_provider;
+
+	data->rcdev.owner = THIS_MODULE;
+	data->rcdev.nr_resets = count;
+	data->rcdev.ops = &sun9i_mmc_reset_ops;
+	data->rcdev.of_node = pdev->dev.of_node;
+
+	ret = reset_controller_register(&data->rcdev);
+	if (ret)
+		goto err_rc_reg;
+
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+
+err_rc_reg:
+	of_clk_del_provider(np);
+
+err_clk_provider:
+	for (i = 0; i < count; i++)
+		clk_unregister(clk_data->clks[i]);
+
+err_clk_register:
+	reset_control_assert(data->reset);
+
+	return ret;
+}
+
+static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
+	struct clk_onecell_data *clk_data = &data->clk_data;
+	int i;
+
+	reset_controller_unregister(&data->rcdev);
+	of_clk_del_provider(np);
+	for (i = 0; i < clk_data->clk_num; i++)
+		clk_unregister(clk_data->clks[i]);
+
+	reset_control_assert(data->reset);
+
+	return 0;
+}
+
+static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
+	{ .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
+	.driver = {
+		.name = "sun9i-a80-mmc-config-clk",
+		.of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
+	},
+	.probe = sun9i_a80_mmc_config_clk_probe,
+	.remove = sun9i_a80_mmc_config_clk_remove,
+};
+module_platform_driver(sun9i_a80_mmc_config_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 1818f40..379324e 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -20,11 +20,221 @@
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
 #include <linux/spinlock.h>
+#include <linux/log2.h>
 
 #include "clk-factors.h"
 
 static DEFINE_SPINLOCK(clk_lock);
 
+/**
+ * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
+ */
+
+#define SUN6I_AHB1_MAX_PARENTS		4
+#define SUN6I_AHB1_MUX_PARENT_PLL6	3
+#define SUN6I_AHB1_MUX_SHIFT		12
+/* un-shifted mask is what mux_clk expects */
+#define SUN6I_AHB1_MUX_MASK		0x3
+#define SUN6I_AHB1_MUX_GET_PARENT(reg)	((reg >> SUN6I_AHB1_MUX_SHIFT) & \
+					 SUN6I_AHB1_MUX_MASK)
+
+#define SUN6I_AHB1_DIV_SHIFT		4
+#define SUN6I_AHB1_DIV_MASK		(0x3 << SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_GET(reg)		((reg & SUN6I_AHB1_DIV_MASK) >> \
+						SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_SET(reg, div)	((reg & ~SUN6I_AHB1_DIV_MASK) | \
+						(div << SUN6I_AHB1_DIV_SHIFT))
+#define SUN6I_AHB1_PLL6_DIV_SHIFT	6
+#define SUN6I_AHB1_PLL6_DIV_MASK	(0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_GET(reg)	((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
+						SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
+						(div << SUN6I_AHB1_PLL6_DIV_SHIFT))
+
+struct sun6i_ahb1_clk {
+	struct clk_hw hw;
+	void __iomem *reg;
+};
+
+#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
+
+static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+	unsigned long rate;
+	u32 reg;
+
+	/* Fetch the register value */
+	reg = readl(ahb1->reg);
+
+	/* apply pre-divider first if parent is pll6 */
+	if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
+		parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
+
+	/* clk divider */
+	rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
+
+	return rate;
+}
+
+static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
+				 u8 parent, unsigned long parent_rate)
+{
+	u8 div, calcp, calcm = 1;
+
+	/*
+	 * clock can only divide, so we will never be able to achieve
+	 * frequencies higher than the parent frequency
+	 */
+	if (parent_rate && rate > parent_rate)
+		rate = parent_rate;
+
+	div = DIV_ROUND_UP(parent_rate, rate);
+
+	/* calculate pre-divider if parent is pll6 */
+	if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
+		if (div < 4)
+			calcp = 0;
+		else if (div / 2 < 4)
+			calcp = 1;
+		else if (div / 4 < 4)
+			calcp = 2;
+		else
+			calcp = 3;
+
+		calcm = DIV_ROUND_UP(div, 1 << calcp);
+	} else {
+		calcp = __roundup_pow_of_two(div);
+		calcp = calcp > 3 ? 3 : calcp;
+	}
+
+	/* we were asked to pass back divider values */
+	if (divp) {
+		*divp = calcp;
+		*pre_divp = calcm - 1;
+	}
+
+	return (parent_rate / calcm) >> calcp;
+}
+
+static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+					  unsigned long min_rate,
+					  unsigned long max_rate,
+					  unsigned long *best_parent_rate,
+					  struct clk_hw **best_parent_clk)
+{
+	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	int i, num_parents;
+	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
+
+	/* find the parent that can help provide the fastest rate <= rate */
+	num_parents = __clk_get_num_parents(clk);
+	for (i = 0; i < num_parents; i++) {
+		parent = clk_get_parent_by_index(clk, i);
+		if (!parent)
+			continue;
+		if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
+			parent_rate = __clk_round_rate(parent, rate);
+		else
+			parent_rate = __clk_get_rate(parent);
+
+		child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+						  parent_rate);
+
+		if (child_rate <= rate && child_rate > best_child_rate) {
+			best_parent = parent;
+			best = parent_rate;
+			best_child_rate = child_rate;
+		}
+	}
+
+	if (best_parent)
+		*best_parent_clk = __clk_get_hw(best_parent);
+	*best_parent_rate = best;
+
+	return best_child_rate;
+}
+
+static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long parent_rate)
+{
+	struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+	unsigned long flags;
+	u8 div, pre_div, parent;
+	u32 reg;
+
+	spin_lock_irqsave(&clk_lock, flags);
+
+	reg = readl(ahb1->reg);
+
+	/* need to know which parent is used to apply pre-divider */
+	parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
+	sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
+
+	reg = SUN6I_AHB1_DIV_SET(reg, div);
+	reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
+	writel(reg, ahb1->reg);
+
+	spin_unlock_irqrestore(&clk_lock, flags);
+
+	return 0;
+}
+
+static const struct clk_ops sun6i_ahb1_clk_ops = {
+	.determine_rate	= sun6i_ahb1_clk_determine_rate,
+	.recalc_rate	= sun6i_ahb1_clk_recalc_rate,
+	.set_rate	= sun6i_ahb1_clk_set_rate,
+};
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	struct sun6i_ahb1_clk *ahb1;
+	struct clk_mux *mux;
+	const char *clk_name = node->name;
+	const char *parents[SUN6I_AHB1_MAX_PARENTS];
+	void __iomem *reg;
+	int i = 0;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+
+	/* we have a mux, we will have >1 parents */
+	while (i < SUN6I_AHB1_MAX_PARENTS &&
+	       (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+		i++;
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
+	if (!ahb1)
+		return;
+
+	mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+	if (!mux) {
+		kfree(ahb1);
+		return;
+	}
+
+	/* set up clock properties */
+	mux->reg = reg;
+	mux->shift = SUN6I_AHB1_MUX_SHIFT;
+	mux->mask = SUN6I_AHB1_MUX_MASK;
+	mux->lock = &clk_lock;
+	ahb1->reg = reg;
+
+	clk = clk_register_composite(NULL, clk_name, parents, i,
+				     &mux->hw, &clk_mux_ops,
+				     &ahb1->hw, &sun6i_ahb1_clk_ops,
+				     NULL, NULL, 0);
+
+	if (!IS_ERR(clk)) {
+		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+		clk_register_clkdev(clk, clk_name, NULL);
+	}
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
+
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS	5
 
@@ -355,43 +565,6 @@
 }
 
 /**
- * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
- */
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
-{
-	#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-	#define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-
-	struct clk_hw *hw = __clk_get_hw(clk);
-	struct clk_composite *composite = to_clk_composite(hw);
-	struct clk_hw *rate_hw = composite->rate_hw;
-	struct clk_factors *factors = to_clk_factors(rate_hw);
-	unsigned long flags = 0;
-	u32 reg;
-
-	if (factors->lock)
-		spin_lock_irqsave(factors->lock, flags);
-
-	reg = readl(factors->reg);
-
-	/* set sample clock phase control */
-	reg &= ~(0x7 << 20);
-	reg |= ((sample & 0x7) << 20);
-
-	/* set output clock phase control */
-	reg &= ~(0x7 << 8);
-	reg |= ((output & 0x7) << 8);
-
-	writel(reg, factors->reg);
-
-	if (factors->lock)
-		spin_unlock_irqrestore(factors->lock, flags);
-}
-EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
-
-
-/**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
 
@@ -413,6 +586,7 @@
 	.kwidth = 2,
 	.mshift = 0,
 	.mwidth = 2,
+	.n_start = 1,
 };
 
 static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@
 static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
 						   const struct factors_data *data)
 {
-	return sunxi_factors_register(node, data, &clk_lock);
+	void __iomem *reg;
+
+	reg = of_iomap(node, 0);
+	if (!reg) {
+		pr_err("Could not get registers for factors-clk: %s\n",
+		       node->name);
+		return NULL;
+	}
+
+	return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
 
@@ -561,7 +744,7 @@
 	of_property_read_string(node, "clock-output-names", &clk_name);
 
 	clk = clk_register_mux(NULL, clk_name, parents, i,
-			       CLK_SET_RATE_NO_REPARENT, reg,
+			       CLK_SET_RATE_PARENT, reg,
 			       data->shift, SUNXI_MUX_GATE_WIDTH,
 			       0, &clk_lock);
 
@@ -1217,7 +1400,6 @@
 
 static const char *sun6i_critical_clocks[] __initdata = {
 	"cpu",
-	"ahb1_sdram",
 };
 
 static void __init sun6i_init_clocks(struct device_node *node)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index f7dfb72..edb8358 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -15,3 +15,4 @@
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)	+= clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)	+= clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC)	+= clk-tegra124.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 0011d54..60738cc 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -64,10 +64,8 @@
 	tegra_clk_disp2,
 	tegra_clk_dp2,
 	tegra_clk_dpaux,
-	tegra_clk_dsia,
 	tegra_clk_dsialp,
 	tegra_clk_dsia_mux,
-	tegra_clk_dsib,
 	tegra_clk_dsiblp,
 	tegra_clk_dsib_mux,
 	tegra_clk_dtv,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 9e899c18..d84ae49 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -28,7 +28,7 @@
 	const struct clk_ops *mux_ops = periph->mux_ops;
 	struct clk_hw *mux_hw = &periph->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return mux_ops->get_parent(mux_hw);
 }
@@ -39,7 +39,7 @@
 	const struct clk_ops *mux_ops = periph->mux_ops;
 	struct clk_hw *mux_hw = &periph->mux.hw;
 
-	mux_hw->clk = hw->clk;
+	__clk_hw_set_clk(mux_hw, hw);
 
 	return mux_ops->set_parent(mux_hw, index);
 }
@@ -51,7 +51,7 @@
 	const struct clk_ops *div_ops = periph->div_ops;
 	struct clk_hw *div_hw = &periph->divider.hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return div_ops->recalc_rate(div_hw, parent_rate);
 }
@@ -63,7 +63,7 @@
 	const struct clk_ops *div_ops = periph->div_ops;
 	struct clk_hw *div_hw = &periph->divider.hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return div_ops->round_rate(div_hw, rate, prate);
 }
@@ -75,7 +75,7 @@
 	const struct clk_ops *div_ops = periph->div_ops;
 	struct clk_hw *div_hw = &periph->divider.hw;
 
-	div_hw->clk = hw->clk;
+	__clk_hw_set_clk(div_hw, hw);
 
 	return div_ops->set_rate(div_hw, rate, parent_rate);
 }
@@ -86,7 +86,7 @@
 	const struct clk_ops *gate_ops = periph->gate_ops;
 	struct clk_hw *gate_hw = &periph->gate.hw;
 
-	gate_hw->clk = hw->clk;
+	__clk_hw_set_clk(gate_hw, hw);
 
 	return gate_ops->is_enabled(gate_hw);
 }
@@ -97,7 +97,7 @@
 	const struct clk_ops *gate_ops = periph->gate_ops;
 	struct clk_hw *gate_hw = &periph->gate.hw;
 
-	gate_hw->clk = hw->clk;
+	__clk_hw_set_clk(gate_hw, hw);
 
 	return gate_ops->enable(gate_hw);
 }
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index c7c6d8f..bfef9ab 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -816,7 +816,9 @@
 	.enable = clk_plle_enable,
 };
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+	defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+	defined(CONFIG_ARCH_TEGRA_132_SOC)
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
 			   unsigned long parent_rate)
@@ -1505,7 +1507,9 @@
 	return clk;
 }
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+	defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+	defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllxc_ops = {
 	.is_enabled = clk_pll_is_enabled,
 	.enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@
 	parent = __clk_lookup(parent_name);
 	if (!parent) {
 		WARN(1, "parent clk %s of %s must be registered first\n",
-			name, parent_name);
+			parent_name, name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -1665,7 +1669,7 @@
 	parent = __clk_lookup(parent_name);
 	if (!parent) {
 		WARN(1, "parent clk %s of %s must be registered first\n",
-			name, parent_name);
+			parent_name, name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -1706,7 +1710,7 @@
 	parent = __clk_lookup(parent_name);
 	if (!parent) {
 		WARN(1, "parent clk %s of %s must be registered first\n",
-			name, parent_name);
+			parent_name, name);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -1802,7 +1806,7 @@
 }
 #endif
 
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllss_ops = {
 	.is_enabled = clk_pll_is_enabled,
 	.enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@
 	parent = __clk_lookup(parent_name);
 	if (!parent) {
 		WARN(1, "parent clk %s of %s must be registered first\n",
-			name, parent_name);
+			parent_name, name);
 		return ERR_PTR(-EINVAL);
 	}
 
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 37f32c4..cef0727 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -434,10 +434,10 @@
 	MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
 	MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
 	MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
-	MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
-	MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
-	MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
-	MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+	MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
+	MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
+	MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
+	MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
 	MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
 	MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
 	MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@
 	MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
 	MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
 	MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
-	MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
-	MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
-	MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
-	MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
+	MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
+	MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
+	MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
+	MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
 	MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
 	MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
 	MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@
 	GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
 	GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
 	GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
-	GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
-	GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
 	GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
 	GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
 	GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 0b03d2c..d076642 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -715,7 +715,6 @@
 	[tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
 	[tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
 	[tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
-	[tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
 	[tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
 	[tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
 	[tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@
 	[tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
 	[tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
 	[tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
-	[tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
 	[tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
 	[tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
 	[tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@
 			       clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
 	clks[TEGRA114_CLK_DSIB_MUX] = clk;
 
+	clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
+					     0, 48, periph_clk_enb_refcnt);
+	clks[TEGRA114_CLK_DSIA] = clk;
+
+	clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
+					     0, 82, periph_clk_enb_refcnt);
+	clks[TEGRA114_CLK_DSIB] = clk;
+
 	/* emc mux */
 	clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
 			       ARRAY_SIZE(mux_pllmcp_clkm),
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index f5f9bac..9a893f2 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014 NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -28,6 +28,14 @@
 #include "clk.h"
 #include "clk-id.h"
 
+/*
+ * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
+ * banks present in the Tegra124/132 CAR IP block.  The banks are
+ * identified by single letters, e.g.: L, H, U, V, W, X.  See
+ * periph_regs[] in drivers/clk/tegra/clk.c
+ */
+#define TEGRA124_CAR_BANK_COUNT			6
+
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
 
@@ -128,7 +136,6 @@
 static unsigned long pll_ref_freq;
 
 static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(pll_d2_lock);
 static DEFINE_SPINLOCK(pll_e_lock);
 static DEFINE_SPINLOCK(pll_re_lock);
 static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@
 	[12] = 260000000,
 };
 
-static const char *mux_plld_out0_plld2_out0[] = {
-	"pll_d_out0", "pll_d2_out0",
-};
-#define mux_plld_out0_plld2_out0_idx NULL
-
 static const char *mux_pllmcp_clkm[] = {
 	"pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
 };
@@ -783,7 +785,6 @@
 	[tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
 	[tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
 	[tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
-	[tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
 	[tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
 	[tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
 	[tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@
 	[tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
 	[tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
 	[tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
-	[tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
 	[tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
 	[tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
 	[tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@
 	[tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
 	[tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
 	[tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
-	[tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
-	[tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@
 					1, 2);
 	clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
 
-	/* dsia mux */
-	clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
-			       ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-			       clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
-	clks[TEGRA124_CLK_DSIA_MUX] = clk;
+	clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+				clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+	clks[TEGRA124_CLK_PLLD_DSI] = clk;
 
-	/* dsib mux */
-	clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
-			       ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-			       clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
-	clks[TEGRA124_CLK_DSIB_MUX] = clk;
+	clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+					     0, 48, periph_clk_enb_refcnt);
+	clks[TEGRA124_CLK_DSIA] = clk;
+
+	clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+					     0, 82, periph_clk_enb_refcnt);
+	clks[TEGRA124_CLK_DSIB] = clk;
 
 	/* emc mux */
 	clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@
 	{},
 };
 
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table common_init_table[] __initdata = {
 	{TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
 	{TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
 	{TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@
 	{TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
 	{TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
 	{TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+	{TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
+	{TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
 	{TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
 	{TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
 	{TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@
 	{TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
 	{TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
 	{TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
-	{TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
 	{TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
 	{TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
 	{TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
-	{TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
 	/* This MUST be the last entry. */
 	{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
 
+static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
+	{TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
+	{TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+	/* This MUST be the last entry. */
+	{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+/* Tegra132 requires the SOC_THERM clock to remain active */
+static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
+	{TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
+	/* This MUST be the last entry. */
+	{TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+/**
+ * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra124 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
 static void __init tegra124_clock_apply_init_table(void)
 {
-	tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+	tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+	tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
 }
 
-static void __init tegra124_clock_init(struct device_node *np)
+/**
+ * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra132 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
+static void __init tegra132_clock_apply_init_table(void)
+{
+	tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+	tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+/**
+ * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the clocks controlled by the CAR IP block, along
+ * with a few clocks controlled by the PMC IP block.  Everything in
+ * this function should be common to Tegra124 and Tegra132.  XXX The
+ * PMC clock initialization should probably be moved to PMC-specific
+ * driver code.  No return value.
+ */
+static void __init tegra124_132_clock_init_pre(struct device_node *np)
 {
 	struct device_node *node;
+	u32 plld_base;
 
 	clk_base = of_iomap(np, 0);
 	if (!clk_base) {
-		pr_err("ioremap tegra124 CAR failed\n");
+		pr_err("ioremap tegra124/tegra132 CAR failed\n");
 		return;
 	}
 
@@ -1423,7 +1469,8 @@
 		return;
 	}
 
-	clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+	clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
+			      TEGRA124_CAR_BANK_COUNT);
 	if (!clks)
 		return;
 
@@ -1437,13 +1484,76 @@
 	tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
 	tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
+	/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
+	plld_base = clk_readl(clk_base + PLLD_BASE);
+	plld_base &= ~BIT(25);
+	clk_writel(plld_base, clk_base + PLLD_BASE);
+}
+
+/**
+ * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the along with a few clocks controlled by the PMC
+ * IP block.  Everything in this function should be common to Tegra124
+ * and Tegra132.  This function must be called after
+ * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
+ * not be set.  No return value.
+ */
+static void __init tegra124_132_clock_init_post(struct device_node *np)
+{
 	tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
-					&pll_x_params);
+				  &pll_x_params);
 	tegra_add_of_provider(np);
 	tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
 
-	tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
-
 	tegra_cpu_car_ops = &tegra124_cpu_car_ops;
 }
+
+/**
+ * tegra124_clock_init - Tegra124-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra124 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra124-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra124_clock_init(struct device_node *np)
+{
+	tegra124_132_clock_init_pre(np);
+	tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+	tegra124_132_clock_init_post(np);
+}
+
+/**
+ * tegra132_clock_init - Tegra132-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra132 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra132-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra132_clock_init(struct device_node *np)
+{
+	tegra124_132_clock_init_pre(np);
+
+	/*
+	 * On Tegra132, these clocks are controlled by the
+	 * CLUSTER_clocks IP block, located in the CPU complex
+	 */
+	tegra124_clks[tegra_clk_cclk_g].present = false;
+	tegra124_clks[tegra_clk_cclk_lp].present = false;
+	tegra124_clks[tegra_clk_pll_x].present = false;
+	tegra124_clks[tegra_clk_pll_x_out0].present = false;
+
+	tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
+	tegra124_132_clock_init_post(np);
+}
 CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
+CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 97dc859..9ddb754 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -302,10 +302,13 @@
 
 tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
 
-void __init tegra_clocks_apply_init_table(void)
+static int __init tegra_clocks_apply_init_table(void)
 {
 	if (!tegra_clk_apply_init_table)
-		return;
+		return 0;
 
 	tegra_clk_apply_init_table();
+
+	return 0;
 }
+arch_initcall(tegra_clocks_apply_init_table);
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index ed4d0aa..105ffd0 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,13 +1,17 @@
-ifneq ($(CONFIG_OF),)
 obj-y					+= clk.o autoidle.o clockdomain.o
 clk-common				= dpll.o composite.o divider.o gate.o \
 					  fixed-factor.o mux.o apll.o
 obj-$(CONFIG_SOC_AM33XX)		+= $(clk-common) clk-33xx.o
+obj-$(CONFIG_SOC_TI81XX)		+= $(clk-common) fapll.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)		+= $(clk-common) interface.o clk-2xxx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(clk-common) interface.o \
+					   clk-3xxx.o
 obj-$(CONFIG_ARCH_OMAP4)		+= $(clk-common) clk-44xx.o
 obj-$(CONFIG_SOC_OMAP5)			+= $(clk-common) clk-54xx.o
 obj-$(CONFIG_SOC_DRA7XX)		+= $(clk-common) clk-7xx.o \
 					   clk-dra7-atl.o
 obj-$(CONFIG_SOC_AM43XX)		+= $(clk-common) clk-43xx.o
+
+ifdef CONFIG_ATAGS
+obj-$(CONFIG_ARCH_OMAP3)                += clk-3xxx-legacy.o
 endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644
index 0000000..e0732a4
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -0,0 +1,4653 @@
+/*
+ * OMAP3 Legacy clock data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_clk_fixed virt_12m_ck_data = {
+	.frequency = 12000000,
+};
+
+static struct ti_clk virt_12m_ck = {
+	.name = "virt_12m_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_12m_ck_data,
+};
+
+static struct ti_clk_fixed virt_13m_ck_data = {
+	.frequency = 13000000,
+};
+
+static struct ti_clk virt_13m_ck = {
+	.name = "virt_13m_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_13m_ck_data,
+};
+
+static struct ti_clk_fixed virt_19200000_ck_data = {
+	.frequency = 19200000,
+};
+
+static struct ti_clk virt_19200000_ck = {
+	.name = "virt_19200000_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_19200000_ck_data,
+};
+
+static struct ti_clk_fixed virt_26000000_ck_data = {
+	.frequency = 26000000,
+};
+
+static struct ti_clk virt_26000000_ck = {
+	.name = "virt_26000000_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_26000000_ck_data,
+};
+
+static struct ti_clk_fixed virt_38_4m_ck_data = {
+	.frequency = 38400000,
+};
+
+static struct ti_clk virt_38_4m_ck = {
+	.name = "virt_38_4m_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_38_4m_ck_data,
+};
+
+static struct ti_clk_fixed virt_16_8m_ck_data = {
+	.frequency = 16800000,
+};
+
+static struct ti_clk virt_16_8m_ck = {
+	.name = "virt_16_8m_ck",
+	.type = TI_CLK_FIXED,
+	.data = &virt_16_8m_ck_data,
+};
+
+static const char *osc_sys_ck_parents[] = {
+	"virt_12m_ck",
+	"virt_13m_ck",
+	"virt_19200000_ck",
+	"virt_26000000_ck",
+	"virt_38_4m_ck",
+	"virt_16_8m_ck",
+};
+
+static struct ti_clk_mux osc_sys_ck_data = {
+	.num_parents = ARRAY_SIZE(osc_sys_ck_parents),
+	.reg = 0xd40,
+	.module = TI_CLKM_PRM,
+	.parents = osc_sys_ck_parents,
+};
+
+static struct ti_clk osc_sys_ck = {
+	.name = "osc_sys_ck",
+	.type = TI_CLK_MUX,
+	.data = &osc_sys_ck_data,
+};
+
+static struct ti_clk_divider sys_ck_data = {
+	.parent = "osc_sys_ck",
+	.bit_shift = 6,
+	.max_div = 3,
+	.reg = 0x1270,
+	.module = TI_CLKM_PRM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk sys_ck = {
+	.name = "sys_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &sys_ck_data,
+};
+
+static const char *dpll3_ck_parents[] = {
+	"sys_ck",
+	"sys_ck",
+};
+
+static struct ti_clk_dpll dpll3_ck_data = {
+	.num_parents = ARRAY_SIZE(dpll3_ck_parents),
+	.control_reg = 0xd00,
+	.idlest_reg = 0xd20,
+	.mult_div1_reg = 0xd40,
+	.autoidle_reg = 0xd30,
+	.module = TI_CLKM_CM,
+	.parents = dpll3_ck_parents,
+	.flags = CLKF_CORE,
+	.freqsel_mask = 0xf0,
+	.div1_mask = 0x7f00,
+	.idlest_mask = 0x1,
+	.auto_recal_bit = 0x3,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x5,
+	.max_multiplier = 0x7ff,
+	.enable_mask = 0x7,
+	.mult_mask = 0x7ff0000,
+	.recal_st_bit = 0x5,
+	.autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll3_ck = {
+	.name = "dpll3_ck",
+	.clkdm_name = "dpll3_clkdm",
+	.type = TI_CLK_DPLL,
+	.data = &dpll3_ck_data,
+};
+
+static struct ti_clk_divider dpll3_m2_ck_data = {
+	.parent = "dpll3_ck",
+	.bit_shift = 27,
+	.max_div = 31,
+	.reg = 0xd40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m2_ck = {
+	.name = "dpll3_m2_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll3_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor core_ck_data = {
+	.parent = "dpll3_m2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_ck = {
+	.name = "core_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_ck_data,
+};
+
+static struct ti_clk_divider l3_ick_data = {
+	.parent = "core_ck",
+	.max_div = 3,
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l3_ick = {
+	.name = "l3_ick",
+	.type = TI_CLK_DIVIDER,
+	.data = &l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor security_l3_ick_data = {
+	.parent = "l3_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk security_l3_ick = {
+	.name = "security_l3_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &security_l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor wkup_l4_ick_data = {
+	.parent = "sys_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk wkup_l4_ick = {
+	.name = "wkup_l4_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &wkup_l4_ick_data,
+};
+
+static struct ti_clk_gate usim_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 9,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usim_ick = {
+	.name = "usim_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usim_ick_data,
+};
+
+static struct ti_clk_gate dss2_alwon_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 1,
+	.reg = 0xe00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss2_alwon_fck = {
+	.name = "dss2_alwon_fck",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss2_alwon_fck_data,
+};
+
+static struct ti_clk_divider l4_ick_data = {
+	.parent = "l3_ick",
+	.bit_shift = 2,
+	.max_div = 3,
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l4_ick = {
+	.name = "l4_ick",
+	.type = TI_CLK_DIVIDER,
+	.data = &l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l4_ick_data = {
+	.parent = "l4_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_l4_ick = {
+	.name = "core_l4_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_l4_ick_data,
+};
+
+static struct ti_clk_gate mmchs2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 25,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs2_ick = {
+	.name = "mmchs2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs2_ick_data,
+};
+
+static const char *dpll4_ck_parents[] = {
+	"sys_ck",
+	"sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_data = {
+	.num_parents = ARRAY_SIZE(dpll4_ck_parents),
+	.control_reg = 0xd00,
+	.idlest_reg = 0xd20,
+	.mult_div1_reg = 0xd44,
+	.autoidle_reg = 0xd30,
+	.module = TI_CLKM_CM,
+	.parents = dpll4_ck_parents,
+	.flags = CLKF_PER,
+	.freqsel_mask = 0xf00000,
+	.modes = 0x82,
+	.div1_mask = 0x7f,
+	.idlest_mask = 0x2,
+	.auto_recal_bit = 0x13,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x6,
+	.max_multiplier = 0x7ff,
+	.enable_mask = 0x70000,
+	.mult_mask = 0x7ff00,
+	.recal_st_bit = 0x6,
+	.autoidle_mask = 0x38,
+};
+
+static struct ti_clk dpll4_ck = {
+	.name = "dpll4_ck",
+	.clkdm_name = "dpll4_clkdm",
+	.type = TI_CLK_DPLL,
+	.data = &dpll4_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m2_ck_data = {
+	.parent = "dpll4_ck",
+	.max_div = 63,
+	.reg = 0xd48,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m2_ck = {
+	.name = "dpll4_m2_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll4_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
+	.parent = "dpll4_m2_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll4_m2x2_mul_ck = {
+	.name = "dpll4_m2x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_m2x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_data = {
+	.parent = "dpll4_m2x2_mul_ck",
+	.bit_shift = 0x1b,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck = {
+	.name = "dpll4_m2x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
+	.parent = "dpll4_m2x2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk omap_96m_alwon_fck = {
+	.name = "omap_96m_alwon_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_96m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_fck_data = {
+	.parent = "omap_96m_alwon_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk cm_96m_fck = {
+	.name = "cm_96m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &cm_96m_fck_data,
+};
+
+static const char *omap_96m_fck_parents[] = {
+	"cm_96m_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux omap_96m_fck_data = {
+	.bit_shift = 6,
+	.num_parents = ARRAY_SIZE(omap_96m_fck_parents),
+	.reg = 0xd40,
+	.module = TI_CLKM_CM,
+	.parents = omap_96m_fck_parents,
+};
+
+static struct ti_clk omap_96m_fck = {
+	.name = "omap_96m_fck",
+	.type = TI_CLK_MUX,
+	.data = &omap_96m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_96m_fck_data = {
+	.parent = "omap_96m_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_96m_fck = {
+	.name = "core_96m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_96m_fck_data,
+};
+
+static struct ti_clk_gate mspro_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 23,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mspro_fck = {
+	.name = "mspro_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mspro_fck_data,
+};
+
+static struct ti_clk_gate dss_ick_3430es2_data = {
+	.parent = "l4_ick",
+	.bit_shift = 0,
+	.reg = 0xe10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es2 = {
+	.name = "dss_ick",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss_ick_3430es2_data,
+};
+
+static struct ti_clk_gate uart4_ick_am35xx_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 23,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick_am35xx = {
+	.name = "uart4_ick_am35xx",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart4_ick_am35xx_data,
+};
+
+static struct ti_clk_fixed_factor security_l4_ick2_data = {
+	.parent = "l4_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk security_l4_ick2 = {
+	.name = "security_l4_ick2",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &security_l4_ick2_data,
+};
+
+static struct ti_clk_gate aes1_ick_data = {
+	.parent = "security_l4_ick2",
+	.bit_shift = 3,
+	.reg = 0xa14,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes1_ick = {
+	.name = "aes1_ick",
+	.type = TI_CLK_GATE,
+	.data = &aes1_ick_data,
+};
+
+static const char *dpll5_ck_parents[] = {
+	"sys_ck",
+	"sys_ck",
+};
+
+static struct ti_clk_dpll dpll5_ck_data = {
+	.num_parents = ARRAY_SIZE(dpll5_ck_parents),
+	.control_reg = 0xd04,
+	.idlest_reg = 0xd24,
+	.mult_div1_reg = 0xd4c,
+	.autoidle_reg = 0xd34,
+	.module = TI_CLKM_CM,
+	.parents = dpll5_ck_parents,
+	.freqsel_mask = 0xf0,
+	.modes = 0x82,
+	.div1_mask = 0x7f,
+	.idlest_mask = 0x1,
+	.auto_recal_bit = 0x3,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x19,
+	.max_multiplier = 0x7ff,
+	.enable_mask = 0x7,
+	.mult_mask = 0x7ff00,
+	.recal_st_bit = 0x19,
+	.autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll5_ck = {
+	.name = "dpll5_ck",
+	.clkdm_name = "dpll5_clkdm",
+	.type = TI_CLK_DPLL,
+	.data = &dpll5_ck_data,
+};
+
+static struct ti_clk_divider dpll5_m2_ck_data = {
+	.parent = "dpll5_ck",
+	.max_div = 31,
+	.reg = 0xd50,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll5_m2_ck = {
+	.name = "dpll5_m2_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll5_m2_ck_data,
+};
+
+static struct ti_clk_gate usbhost_120m_fck_data = {
+	.parent = "dpll5_m2_ck",
+	.bit_shift = 1,
+	.reg = 0x1400,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk usbhost_120m_fck = {
+	.name = "usbhost_120m_fck",
+	.clkdm_name = "usbhost_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usbhost_120m_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
+	.parent = "cm_96m_fck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk cm_96m_d2_fck = {
+	.name = "cm_96m_d2_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &cm_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed sys_altclk_data = {
+	.frequency = 0x0,
+};
+
+static struct ti_clk sys_altclk = {
+	.name = "sys_altclk",
+	.type = TI_CLK_FIXED,
+	.data = &sys_altclk_data,
+};
+
+static const char *omap_48m_fck_parents[] = {
+	"cm_96m_d2_fck",
+	"sys_altclk",
+};
+
+static struct ti_clk_mux omap_48m_fck_data = {
+	.bit_shift = 3,
+	.num_parents = ARRAY_SIZE(omap_48m_fck_parents),
+	.reg = 0xd40,
+	.module = TI_CLKM_CM,
+	.parents = omap_48m_fck_parents,
+};
+
+static struct ti_clk omap_48m_fck = {
+	.name = "omap_48m_fck",
+	.type = TI_CLK_MUX,
+	.data = &omap_48m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_48m_fck_data = {
+	.parent = "omap_48m_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_48m_fck = {
+	.name = "core_48m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_48m_fck_data,
+};
+
+static struct ti_clk_fixed mcbsp_clks_data = {
+	.frequency = 0x0,
+};
+
+static struct ti_clk mcbsp_clks = {
+	.name = "mcbsp_clks",
+	.type = TI_CLK_FIXED,
+	.data = &mcbsp_clks_data,
+};
+
+static struct ti_clk_gate mcbsp2_gate_fck_data = {
+	.parent = "mcbsp_clks",
+	.bit_shift = 0,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_96m_fck_data = {
+	.parent = "omap_96m_alwon_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk per_96m_fck = {
+	.name = "per_96m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &per_96m_fck_data,
+};
+
+static const char *mcbsp2_mux_fck_parents[] = {
+	"per_96m_fck",
+	"mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp2_mux_fck_data = {
+	.bit_shift = 6,
+	.num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
+	.reg = 0x274,
+	.module = TI_CLKM_SCRM,
+	.parents = mcbsp2_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp2_fck_data = {
+	.mux = &mcbsp2_mux_fck_data,
+	.gate = &mcbsp2_gate_fck_data,
+};
+
+static struct ti_clk mcbsp2_fck = {
+	.name = "mcbsp2_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &mcbsp2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
+	.parent = "dpll3_m2_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll3_m2x2_ck = {
+	.name = "dpll3_m2x2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll3_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_fck_data = {
+	.parent = "dpll3_m2x2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk corex2_fck = {
+	.name = "corex2_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &corex2_fck_data,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
+	.parent = "corex2_fck",
+	.bit_shift = 0,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_NO_WAIT,
+};
+
+static int ssi_ssr_div_fck_3430es1_divs[] = {
+	0,
+	1,
+	2,
+	3,
+	4,
+	0,
+	6,
+	0,
+	8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
+	.num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
+	.parent = "corex2_fck",
+	.bit_shift = 8,
+	.dividers = ssi_ssr_div_fck_3430es1_divs,
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
+	.gate = &ssi_ssr_gate_fck_3430es1_data,
+	.divider = &ssi_ssr_div_fck_3430es1_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es1 = {
+	.name = "ssi_ssr_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &ssi_ssr_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
+	.parent = "ssi_ssr_fck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es1 = {
+	.name = "ssi_sst_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &ssi_sst_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed omap_32k_fck_data = {
+	.frequency = 32768,
+};
+
+static struct ti_clk omap_32k_fck = {
+	.name = "omap_32k_fck",
+	.type = TI_CLK_FIXED,
+	.data = &omap_32k_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
+	.parent = "omap_32k_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk per_32k_alwon_fck = {
+	.name = "per_32k_alwon_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &per_32k_alwon_fck_data,
+};
+
+static struct ti_clk_gate gpio5_dbck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 16,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio5_dbck = {
+	.name = "gpio5_dbck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio5_dbck_data,
+};
+
+static struct ti_clk_gate gpt1_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 0,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt1_ick = {
+	.name = "gpt1_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt1_ick_data,
+};
+
+static struct ti_clk_gate mcspi3_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 20,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi3_fck = {
+	.name = "mcspi3_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi3_fck_data,
+};
+
+static struct ti_clk_gate gpt2_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 3,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static const char *gpt2_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt2_mux_fck_data = {
+	.num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt2_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt2_fck_data = {
+	.mux = &gpt2_mux_fck_data,
+	.gate = &gpt2_gate_fck_data,
+};
+
+static struct ti_clk gpt2_fck = {
+	.name = "gpt2_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt2_fck_data,
+};
+
+static struct ti_clk_gate gpt10_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 11,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt10_ick = {
+	.name = "gpt10_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt10_ick_data,
+};
+
+static struct ti_clk_gate uart2_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 14,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart2_fck = {
+	.name = "uart2_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart2_fck_data,
+};
+
+static struct ti_clk_fixed_factor sr_l4_ick_data = {
+	.parent = "l4_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk sr_l4_ick = {
+	.name = "sr_l4_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &sr_l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
+	.parent = "omap_96m_fck",
+	.div = 8,
+	.mult = 1,
+};
+
+static struct ti_clk omap_96m_d8_fck = {
+	.name = "omap_96m_d8_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_96m_d8_fck_data,
+};
+
+static struct ti_clk_divider dpll4_m5_ck_data = {
+	.parent = "dpll4_ck",
+	.max_div = 63,
+	.reg = 0xf40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m5_ck = {
+	.name = "dpll4_m5_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll4_m5_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
+	.parent = "dpll4_m5_ck",
+	.div = 1,
+	.mult = 2,
+	.flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m5x2_mul_ck = {
+	.name = "dpll4_m5x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_m5x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_data = {
+	.parent = "dpll4_m5x2_mul_ck",
+	.bit_shift = 0x1e,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck = {
+	.name = "dpll4_m5x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m5x2_ck_data,
+};
+
+static struct ti_clk_gate cam_mclk_data = {
+	.parent = "dpll4_m5x2_ck",
+	.bit_shift = 0,
+	.reg = 0xf00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk cam_mclk = {
+	.name = "cam_mclk",
+	.type = TI_CLK_GATE,
+	.data = &cam_mclk_data,
+};
+
+static struct ti_clk_gate mcbsp3_gate_fck_data = {
+	.parent = "mcbsp_clks",
+	.bit_shift = 1,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static const char *mcbsp3_mux_fck_parents[] = {
+	"per_96m_fck",
+	"mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp3_mux_fck_data = {
+	.num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
+	.reg = 0x2d8,
+	.module = TI_CLKM_SCRM,
+	.parents = mcbsp3_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp3_fck_data = {
+	.mux = &mcbsp3_mux_fck_data,
+	.gate = &mcbsp3_gate_fck_data,
+};
+
+static struct ti_clk mcbsp3_fck = {
+	.name = "mcbsp3_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &mcbsp3_fck_data,
+};
+
+static struct ti_clk_gate csi2_96m_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 1,
+	.reg = 0xf00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk csi2_96m_fck = {
+	.name = "csi2_96m_fck",
+	.clkdm_name = "cam_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &csi2_96m_fck_data,
+};
+
+static struct ti_clk_gate gpt9_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 10,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static const char *gpt9_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt9_mux_fck_data = {
+	.bit_shift = 7,
+	.num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt9_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt9_fck_data = {
+	.mux = &gpt9_mux_fck_data,
+	.gate = &gpt9_gate_fck_data,
+};
+
+static struct ti_clk gpt9_fck = {
+	.name = "gpt9_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt9_fck_data,
+};
+
+static struct ti_clk_divider dpll3_m3_ck_data = {
+	.parent = "dpll3_ck",
+	.bit_shift = 16,
+	.max_div = 31,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m3_ck = {
+	.name = "dpll3_m3_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll3_m3_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
+	.parent = "dpll3_m3_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll3_m3x2_mul_ck = {
+	.name = "dpll3_m3x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll3_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate sr2_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 7,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr2_fck = {
+	.name = "sr2_fck",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sr2_fck_data,
+};
+
+static struct ti_clk_fixed pclk_ck_data = {
+	.frequency = 27000000,
+};
+
+static struct ti_clk pclk_ck = {
+	.name = "pclk_ck",
+	.type = TI_CLK_FIXED,
+	.data = &pclk_ck_data,
+};
+
+static struct ti_clk_gate wdt2_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 5,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt2_ick = {
+	.name = "wdt2_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &wdt2_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l3_ick_data = {
+	.parent = "l3_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_l3_ick = {
+	.name = "core_l3_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_l3_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 21,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi4_fck = {
+	.name = "mcspi4_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi4_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_48m_fck_data = {
+	.parent = "omap_48m_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk per_48m_fck = {
+	.name = "per_48m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &per_48m_fck_data,
+};
+
+static struct ti_clk_gate uart4_fck_data = {
+	.parent = "per_48m_fck",
+	.bit_shift = 18,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck = {
+	.name = "uart4_fck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart4_fck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
+	.parent = "omap_96m_fck",
+	.div = 10,
+	.mult = 1,
+};
+
+static struct ti_clk omap_96m_d10_fck = {
+	.name = "omap_96m_d10_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_96m_d10_fck_data,
+};
+
+static struct ti_clk_gate usim_gate_fck_data = {
+	.parent = "omap_96m_fck",
+	.bit_shift = 9,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_l4_ick_data = {
+	.parent = "l4_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk per_l4_ick = {
+	.name = "per_l4_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &per_l4_ick_data,
+};
+
+static struct ti_clk_gate gpt5_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 6,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt5_ick = {
+	.name = "gpt5_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt5_ick_data,
+};
+
+static struct ti_clk_gate mcspi2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 19,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi2_ick = {
+	.name = "mcspi2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi2_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_l4_ick_data = {
+	.parent = "l4_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk ssi_l4_ick = {
+	.name = "ssi_l4_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &ssi_l4_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es1_data = {
+	.parent = "ssi_l4_ick",
+	.bit_shift = 0,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es1 = {
+	.name = "ssi_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &ssi_ick_3430es1_data,
+};
+
+static struct ti_clk_gate i2c2_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 16,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c2_fck = {
+	.name = "i2c2_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c2_fck_data,
+};
+
+static struct ti_clk_divider dpll1_fck_data = {
+	.parent = "core_ck",
+	.bit_shift = 19,
+	.max_div = 7,
+	.reg = 0x940,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_fck = {
+	.name = "dpll1_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll1_fck_data,
+};
+
+static const char *dpll1_ck_parents[] = {
+	"sys_ck",
+	"dpll1_fck",
+};
+
+static struct ti_clk_dpll dpll1_ck_data = {
+	.num_parents = ARRAY_SIZE(dpll1_ck_parents),
+	.control_reg = 0x904,
+	.idlest_reg = 0x924,
+	.mult_div1_reg = 0x940,
+	.autoidle_reg = 0x934,
+	.module = TI_CLKM_CM,
+	.parents = dpll1_ck_parents,
+	.freqsel_mask = 0xf0,
+	.modes = 0xa0,
+	.div1_mask = 0x7f,
+	.idlest_mask = 0x1,
+	.auto_recal_bit = 0x3,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x7,
+	.max_multiplier = 0x7ff,
+	.enable_mask = 0x7,
+	.mult_mask = 0x7ff00,
+	.recal_st_bit = 0x7,
+	.autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll1_ck = {
+	.name = "dpll1_ck",
+	.clkdm_name = "dpll1_clkdm",
+	.type = TI_CLK_DPLL,
+	.data = &dpll1_ck_data,
+};
+
+static struct ti_clk_fixed secure_32k_fck_data = {
+	.frequency = 32768,
+};
+
+static struct ti_clk secure_32k_fck = {
+	.name = "secure_32k_fck",
+	.type = TI_CLK_FIXED,
+	.data = &secure_32k_fck_data,
+};
+
+static struct ti_clk_gate gpio5_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 16,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio5_ick = {
+	.name = "gpio5_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio5_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m4_ck_data = {
+	.parent = "dpll4_ck",
+	.max_div = 32,
+	.reg = 0xe40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m4_ck = {
+	.name = "dpll4_m4_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll4_m4_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
+	.parent = "dpll4_m4_ck",
+	.div = 1,
+	.mult = 2,
+	.flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m4x2_mul_ck = {
+	.name = "dpll4_m4x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_m4x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m4x2_ck_data = {
+	.parent = "dpll4_m4x2_mul_ck",
+	.bit_shift = 0x1d,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m4x2_ck = {
+	.name = "dpll4_m4x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m4x2_ck_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
+	.parent = "dpll4_m4x2_ck",
+	.bit_shift = 0,
+	.reg = 0xe00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es2 = {
+	.name = "dss1_alwon_fck",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss1_alwon_fck_3430es2_data,
+};
+
+static struct ti_clk_gate uart3_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 11,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart3_ick = {
+	.name = "uart3_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart3_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m3_ck_data = {
+	.parent = "dpll4_ck",
+	.bit_shift = 8,
+	.max_div = 32,
+	.reg = 0xe40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m3_ck = {
+	.name = "dpll4_m3_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll4_m3_ck_data,
+};
+
+static struct ti_clk_gate mcbsp3_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 1,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp3_ick = {
+	.name = "mcbsp3_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcbsp3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_dbck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 14,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio3_dbck = {
+	.name = "gpio3_dbck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio3_dbck_data,
+};
+
+static struct ti_clk_gate fac_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 8,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk fac_ick = {
+	.name = "fac_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &fac_ick_data,
+};
+
+static struct ti_clk_gate clkout2_src_gate_ck_data = {
+	.parent = "core_ck",
+	.bit_shift = 7,
+	.reg = 0xd70,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
+	.parent = "dpll4_m3_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll4_m3x2_mul_ck = {
+	.name = "dpll4_m3x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_data = {
+	.parent = "dpll4_m3x2_mul_ck",
+	.bit_shift = 0x1c,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck = {
+	.name = "dpll4_m3x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m3x2_ck_data,
+};
+
+static const char *omap_54m_fck_parents[] = {
+	"dpll4_m3x2_ck",
+	"sys_altclk",
+};
+
+static struct ti_clk_mux omap_54m_fck_data = {
+	.bit_shift = 5,
+	.num_parents = ARRAY_SIZE(omap_54m_fck_parents),
+	.reg = 0xd40,
+	.module = TI_CLKM_CM,
+	.parents = omap_54m_fck_parents,
+};
+
+static struct ti_clk omap_54m_fck = {
+	.name = "omap_54m_fck",
+	.type = TI_CLK_MUX,
+	.data = &omap_54m_fck_data,
+};
+
+static const char *clkout2_src_mux_ck_parents[] = {
+	"core_ck",
+	"sys_ck",
+	"cm_96m_fck",
+	"omap_54m_fck",
+};
+
+static struct ti_clk_mux clkout2_src_mux_ck_data = {
+	.num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
+	.reg = 0xd70,
+	.module = TI_CLKM_CM,
+	.parents = clkout2_src_mux_ck_parents,
+};
+
+static struct ti_clk_composite clkout2_src_ck_data = {
+	.mux = &clkout2_src_mux_ck_data,
+	.gate = &clkout2_src_gate_ck_data,
+};
+
+static struct ti_clk clkout2_src_ck = {
+	.name = "clkout2_src_ck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &clkout2_src_ck_data,
+};
+
+static struct ti_clk_gate i2c1_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 15,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c1_fck = {
+	.name = "i2c1_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c1_fck_data,
+};
+
+static struct ti_clk_gate wdt3_fck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 12,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt3_fck = {
+	.name = "wdt3_fck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &wdt3_fck_data,
+};
+
+static struct ti_clk_gate gpt7_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 8,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static const char *gpt7_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt7_mux_fck_data = {
+	.bit_shift = 5,
+	.num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt7_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt7_fck_data = {
+	.mux = &gpt7_mux_fck_data,
+	.gate = &gpt7_gate_fck_data,
+};
+
+static struct ti_clk gpt7_fck = {
+	.name = "gpt7_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt7_fck_data,
+};
+
+static struct ti_clk_gate usb_l4_gate_ick_data = {
+	.parent = "l4_ick",
+	.bit_shift = 5,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INTERFACE,
+};
+
+static struct ti_clk_divider usb_l4_div_ick_data = {
+	.parent = "l4_ick",
+	.bit_shift = 4,
+	.max_div = 1,
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usb_l4_ick_data = {
+	.gate = &usb_l4_gate_ick_data,
+	.divider = &usb_l4_div_ick_data,
+};
+
+static struct ti_clk usb_l4_ick = {
+	.name = "usb_l4_ick",
+	.type = TI_CLK_COMPOSITE,
+	.data = &usb_l4_ick_data,
+};
+
+static struct ti_clk_gate uart4_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 18,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick = {
+	.name = "uart4_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart4_ick_data,
+};
+
+static struct ti_clk_fixed dummy_ck_data = {
+	.frequency = 0,
+};
+
+static struct ti_clk dummy_ck = {
+	.name = "dummy_ck",
+	.type = TI_CLK_FIXED,
+	.data = &dummy_ck_data,
+};
+
+static const char *gpt3_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt3_mux_fck_data = {
+	.bit_shift = 1,
+	.num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt3_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt9_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 10,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt9_ick = {
+	.name = "gpt9_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt9_ick_data,
+};
+
+static struct ti_clk_gate gpt10_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 11,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate dss_ick_3430es1_data = {
+	.parent = "l4_ick",
+	.bit_shift = 0,
+	.reg = 0xe10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es1 = {
+	.name = "dss_ick",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpt11_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 12,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt11_ick = {
+	.name = "gpt11_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt11_ick_data,
+};
+
+static struct ti_clk_divider dpll2_fck_data = {
+	.parent = "core_ck",
+	.bit_shift = 19,
+	.max_div = 7,
+	.reg = 0x40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_fck = {
+	.name = "dpll2_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll2_fck_data,
+};
+
+static struct ti_clk_gate uart1_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 13,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart1_fck = {
+	.name = "uart1_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart1_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
+	.parent = "core_l3_ick",
+	.bit_shift = 4,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es1 = {
+	.name = "hsotgusb_ick_3430es1",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hsotgusb_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpio2_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 13,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio2_ick = {
+	.name = "gpio2_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 24,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs1_ick = {
+	.name = "mmchs1_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs1_ick_data,
+};
+
+static struct ti_clk_gate modem_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 31,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk modem_fck = {
+	.name = "modem_fck",
+	.clkdm_name = "d2d_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &modem_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 2,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp4_ick = {
+	.name = "mcbsp4_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcbsp4_ick_data,
+};
+
+static struct ti_clk_gate gpio1_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 3,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio1_ick = {
+	.name = "gpio1_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio1_ick_data,
+};
+
+static const char *gpt6_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt6_mux_fck_data = {
+	.bit_shift = 4,
+	.num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt6_mux_fck_parents,
+};
+
+static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
+	.parent = "dpll1_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll1_x2_ck = {
+	.name = "dpll1_x2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll1_x2_ck_data,
+};
+
+static struct ti_clk_divider dpll1_x2m2_ck_data = {
+	.parent = "dpll1_x2_ck",
+	.max_div = 31,
+	.reg = 0x944,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_x2m2_ck = {
+	.name = "dpll1_x2m2_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll1_x2m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor mpu_ck_data = {
+	.parent = "dpll1_x2m2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk mpu_ck = {
+	.name = "mpu_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &mpu_ck_data,
+};
+
+static struct ti_clk_divider arm_fck_data = {
+	.parent = "mpu_ck",
+	.max_div = 2,
+	.reg = 0x924,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk arm_fck = {
+	.name = "arm_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &arm_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d3_ck_data = {
+	.parent = "core_ck",
+	.div = 3,
+	.mult = 1,
+};
+
+static struct ti_clk core_d3_ck = {
+	.name = "core_d3_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_d3_ck_data,
+};
+
+static struct ti_clk_gate gpt11_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 12,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+};
+
+static const char *gpt11_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt11_mux_fck_data = {
+	.bit_shift = 7,
+	.num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+	.parents = gpt11_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt11_fck_data = {
+	.mux = &gpt11_mux_fck_data,
+	.gate = &gpt11_gate_fck_data,
+};
+
+static struct ti_clk gpt11_fck = {
+	.name = "gpt11_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt11_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d6_ck_data = {
+	.parent = "core_ck",
+	.div = 6,
+	.mult = 1,
+};
+
+static struct ti_clk core_d6_ck = {
+	.name = "core_d6_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_d6_ck_data,
+};
+
+static struct ti_clk_gate uart4_fck_am35xx_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 23,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck_am35xx = {
+	.name = "uart4_fck_am35xx",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart4_fck_am35xx_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_data = {
+	.parent = "dpll3_m3x2_mul_ck",
+	.bit_shift = 0xc,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck = {
+	.name = "dpll3_m3x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll3_m3x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
+	.parent = "dpll3_m3x2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk emu_core_alwon_ck = {
+	.name = "emu_core_alwon_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &emu_core_alwon_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m6_ck_data = {
+	.parent = "dpll4_ck",
+	.bit_shift = 24,
+	.max_div = 63,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m6_ck = {
+	.name = "dpll4_m6_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll4_m6_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
+	.parent = "dpll4_m6_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll4_m6x2_mul_ck = {
+	.name = "dpll4_m6x2_mul_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_m6x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_data = {
+	.parent = "dpll4_m6x2_mul_ck",
+	.bit_shift = 0x1f,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck = {
+	.name = "dpll4_m6x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m6x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
+	.parent = "dpll4_m6x2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk emu_per_alwon_ck = {
+	.name = "emu_per_alwon_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &emu_per_alwon_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
+	.parent = "mpu_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk emu_mpu_alwon_ck = {
+	.name = "emu_mpu_alwon_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &emu_mpu_alwon_ck_data,
+};
+
+static const char *emu_src_mux_ck_parents[] = {
+	"sys_ck",
+	"emu_core_alwon_ck",
+	"emu_per_alwon_ck",
+	"emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux emu_src_mux_ck_data = {
+	.num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.parents = emu_src_mux_ck_parents,
+};
+
+static struct ti_clk emu_src_mux_ck = {
+	.name = "emu_src_mux_ck",
+	.type = TI_CLK_MUX,
+	.data = &emu_src_mux_ck_data,
+};
+
+static struct ti_clk_gate emu_src_ck_data = {
+	.parent = "emu_src_mux_ck",
+	.flags = CLKF_CLKDM,
+};
+
+static struct ti_clk emu_src_ck = {
+	.name = "emu_src_ck",
+	.clkdm_name = "emu_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &emu_src_ck_data,
+};
+
+static struct ti_clk_divider atclk_fck_data = {
+	.parent = "emu_src_ck",
+	.bit_shift = 4,
+	.max_div = 3,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk atclk_fck = {
+	.name = "atclk_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &atclk_fck_data,
+};
+
+static struct ti_clk_gate ipss_ick_data = {
+	.parent = "core_l3_ick",
+	.bit_shift = 4,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_AM35XX | CLKF_INTERFACE,
+};
+
+static struct ti_clk ipss_ick = {
+	.name = "ipss_ick",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &ipss_ick_data,
+};
+
+static struct ti_clk_gate emac_ick_data = {
+	.parent = "ipss_ick",
+	.bit_shift = 1,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+	.flags = CLKF_AM35XX,
+};
+
+static struct ti_clk emac_ick = {
+	.name = "emac_ick",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &emac_ick_data,
+};
+
+static struct ti_clk_gate vpfe_ick_data = {
+	.parent = "ipss_ick",
+	.bit_shift = 2,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+	.flags = CLKF_AM35XX,
+};
+
+static struct ti_clk vpfe_ick = {
+	.name = "vpfe_ick",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &vpfe_ick_data,
+};
+
+static const char *dpll2_ck_parents[] = {
+	"sys_ck",
+	"dpll2_fck",
+};
+
+static struct ti_clk_dpll dpll2_ck_data = {
+	.num_parents = ARRAY_SIZE(dpll2_ck_parents),
+	.control_reg = 0x4,
+	.idlest_reg = 0x24,
+	.mult_div1_reg = 0x40,
+	.autoidle_reg = 0x34,
+	.module = TI_CLKM_CM,
+	.parents = dpll2_ck_parents,
+	.freqsel_mask = 0xf0,
+	.modes = 0xa2,
+	.div1_mask = 0x7f,
+	.idlest_mask = 0x1,
+	.auto_recal_bit = 0x3,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x8,
+	.max_multiplier = 0x7ff,
+	.enable_mask = 0x7,
+	.mult_mask = 0x7ff00,
+	.recal_st_bit = 0x8,
+	.autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll2_ck = {
+	.name = "dpll2_ck",
+	.clkdm_name = "dpll2_clkdm",
+	.type = TI_CLK_DPLL,
+	.data = &dpll2_ck_data,
+};
+
+static struct ti_clk_divider dpll2_m2_ck_data = {
+	.parent = "dpll2_ck",
+	.max_div = 31,
+	.reg = 0x44,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_m2_ck = {
+	.name = "dpll2_m2_ck",
+	.type = TI_CLK_DIVIDER,
+	.data = &dpll2_m2_ck_data,
+};
+
+static const char *mcbsp4_mux_fck_parents[] = {
+	"per_96m_fck",
+	"mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp4_mux_fck_data = {
+	.bit_shift = 2,
+	.num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
+	.reg = 0x2d8,
+	.module = TI_CLKM_SCRM,
+	.parents = mcbsp4_mux_fck_parents,
+};
+
+static const char *mcbsp1_mux_fck_parents[] = {
+	"core_96m_fck",
+	"mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp1_mux_fck_data = {
+	.bit_shift = 2,
+	.num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
+	.reg = 0x274,
+	.module = TI_CLKM_SCRM,
+	.parents = mcbsp1_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt8_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 9,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt8_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 9,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt8_ick = {
+	.name = "gpt8_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt8_ick_data,
+};
+
+static const char *gpt10_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt10_mux_fck_data = {
+	.bit_shift = 6,
+	.num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+	.parents = gpt10_mux_fck_parents,
+};
+
+static struct ti_clk_gate mmchs3_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 30,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs3_ick = {
+	.name = "mmchs3_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 14,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio3_ick = {
+	.name = "gpio3_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio3_ick_data,
+};
+
+static const char *traceclk_src_fck_parents[] = {
+	"sys_ck",
+	"emu_core_alwon_ck",
+	"emu_per_alwon_ck",
+	"emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux traceclk_src_fck_data = {
+	.bit_shift = 2,
+	.num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.parents = traceclk_src_fck_parents,
+};
+
+static struct ti_clk traceclk_src_fck = {
+	.name = "traceclk_src_fck",
+	.type = TI_CLK_MUX,
+	.data = &traceclk_src_fck_data,
+};
+
+static struct ti_clk_divider traceclk_fck_data = {
+	.parent = "traceclk_src_fck",
+	.bit_shift = 11,
+	.max_div = 7,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk traceclk_fck = {
+	.name = "traceclk_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &traceclk_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_gate_fck_data = {
+	.parent = "mcbsp_clks",
+	.bit_shift = 10,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate sad2d_ick_data = {
+	.parent = "l3_ick",
+	.bit_shift = 3,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sad2d_ick = {
+	.name = "sad2d_ick",
+	.clkdm_name = "d2d_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sad2d_ick_data,
+};
+
+static const char *gpt1_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt1_mux_fck_data = {
+	.num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
+	.reg = 0xc40,
+	.module = TI_CLKM_CM,
+	.parents = gpt1_mux_fck_parents,
+};
+
+static struct ti_clk_gate hecc_ck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 3,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+	.flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hecc_ck = {
+	.name = "hecc_ck",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hecc_ck_data,
+};
+
+static struct ti_clk_gate gpt1_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 0,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt1_fck_data = {
+	.mux = &gpt1_mux_fck_data,
+	.gate = &gpt1_gate_fck_data,
+};
+
+static struct ti_clk gpt1_fck = {
+	.name = "gpt1_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
+	.parent = "dpll4_m2x2_mul_ck",
+	.bit_shift = 0x1b,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck_omap36xx = {
+	.name = "dpll4_m2x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m2x2_ck_omap36xx_data,
+	.patch = &dpll4_m2x2_ck,
+};
+
+static struct ti_clk_divider gfx_l3_fck_data = {
+	.parent = "l3_ick",
+	.max_div = 7,
+	.reg = 0xb40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk gfx_l3_fck = {
+	.name = "gfx_l3_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &gfx_l3_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg1_ck_data = {
+	.parent = "gfx_l3_fck",
+	.bit_shift = 1,
+	.reg = 0xb00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg1_ck = {
+	.name = "gfx_cg1_ck",
+	.clkdm_name = "gfx_3430es1_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gfx_cg1_ck_data,
+};
+
+static struct ti_clk_gate mailboxes_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 7,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mailboxes_ick = {
+	.name = "mailboxes_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mailboxes_ick_data,
+};
+
+static struct ti_clk_gate sha11_ick_data = {
+	.parent = "security_l4_ick2",
+	.bit_shift = 1,
+	.reg = 0xa14,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha11_ick = {
+	.name = "sha11_ick",
+	.type = TI_CLK_GATE,
+	.data = &sha11_ick_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
+	.parent = "ipss_ick",
+	.bit_shift = 0,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+	.flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hsotgusb_ick_am35xx = {
+	.name = "hsotgusb_ick_am35xx",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hsotgusb_ick_am35xx_data,
+};
+
+static struct ti_clk_gate mmchs3_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 30,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs3_fck = {
+	.name = "mmchs3_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs3_fck_data,
+};
+
+static struct ti_clk_divider pclk_fck_data = {
+	.parent = "emu_src_ck",
+	.bit_shift = 8,
+	.max_div = 7,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclk_fck = {
+	.name = "pclk_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &pclk_fck_data,
+};
+
+static const char *dpll4_ck_omap36xx_parents[] = {
+	"sys_ck",
+	"sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
+	.num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
+	.control_reg = 0xd00,
+	.idlest_reg = 0xd20,
+	.mult_div1_reg = 0xd44,
+	.autoidle_reg = 0xd30,
+	.module = TI_CLKM_CM,
+	.parents = dpll4_ck_omap36xx_parents,
+	.modes = 0x82,
+	.div1_mask = 0x7f,
+	.idlest_mask = 0x2,
+	.auto_recal_bit = 0x13,
+	.max_divider = 0x80,
+	.min_divider = 0x1,
+	.recal_en_bit = 0x6,
+	.max_multiplier = 0xfff,
+	.enable_mask = 0x70000,
+	.mult_mask = 0xfff00,
+	.recal_st_bit = 0x6,
+	.autoidle_mask = 0x38,
+	.sddiv_mask = 0xff000000,
+	.dco_mask = 0xe00000,
+	.flags = CLKF_PER | CLKF_J_TYPE,
+};
+
+static struct ti_clk dpll4_ck_omap36xx = {
+	.name = "dpll4_ck",
+	.type = TI_CLK_DPLL,
+	.data = &dpll4_ck_omap36xx_data,
+	.patch = &dpll4_ck,
+};
+
+static struct ti_clk_gate uart3_fck_data = {
+	.parent = "per_48m_fck",
+	.bit_shift = 11,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart3_fck = {
+	.name = "uart3_fck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart3_fck_data,
+};
+
+static struct ti_clk_fixed_factor wkup_32k_fck_data = {
+	.parent = "omap_32k_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk wkup_32k_fck = {
+	.name = "wkup_32k_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &wkup_32k_fck_data,
+};
+
+static struct ti_clk_gate sys_clkout1_data = {
+	.parent = "osc_sys_ck",
+	.bit_shift = 7,
+	.reg = 0xd70,
+	.module = TI_CLKM_PRM,
+};
+
+static struct ti_clk sys_clkout1 = {
+	.name = "sys_clkout1",
+	.type = TI_CLK_GATE,
+	.data = &sys_clkout1_data,
+};
+
+static struct ti_clk_fixed_factor gpmc_fck_data = {
+	.parent = "core_l3_ick",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk gpmc_fck = {
+	.name = "gpmc_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &gpmc_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
+	.parent = "dpll5_m2_ck",
+	.div = 20,
+	.mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d20_ck = {
+	.name = "dpll5_m2_d20_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll5_m2_d20_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
+	.parent = "dpll4_m5x2_mul_ck",
+	.bit_shift = 0x1e,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck_omap36xx = {
+	.name = "dpll4_m5x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m5x2_ck_omap36xx_data,
+	.patch = &dpll4_m5x2_ck,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
+	.parent = "corex2_fck",
+	.bit_shift = 0,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_gate uart1_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 13,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart1_ick = {
+	.name = "uart1_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart1_ick_data,
+};
+
+static struct ti_clk_gate iva2_ck_data = {
+	.parent = "dpll2_m2_ck",
+	.bit_shift = 0,
+	.reg = 0x0,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk iva2_ck = {
+	.name = "iva2_ck",
+	.clkdm_name = "iva2_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &iva2_ck_data,
+};
+
+static struct ti_clk_gate pka_ick_data = {
+	.parent = "security_l3_ick",
+	.bit_shift = 4,
+	.reg = 0xa14,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk pka_ick = {
+	.name = "pka_ick",
+	.type = TI_CLK_GATE,
+	.data = &pka_ick_data,
+};
+
+static struct ti_clk_gate gpt12_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 1,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt12_ick = {
+	.name = "gpt12_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt12_ick_data,
+};
+
+static const char *mcbsp5_mux_fck_parents[] = {
+	"core_96m_fck",
+	"mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp5_mux_fck_data = {
+	.bit_shift = 4,
+	.num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
+	.reg = 0x2d8,
+	.module = TI_CLKM_SCRM,
+	.parents = mcbsp5_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp5_fck_data = {
+	.mux = &mcbsp5_mux_fck_data,
+	.gate = &mcbsp5_gate_fck_data,
+};
+
+static struct ti_clk mcbsp5_fck = {
+	.name = "mcbsp5_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &mcbsp5_fck_data,
+};
+
+static struct ti_clk_gate usbhost_48m_fck_data = {
+	.parent = "omap_48m_fck",
+	.bit_shift = 0,
+	.reg = 0x1400,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_DSS,
+};
+
+static struct ti_clk usbhost_48m_fck = {
+	.name = "usbhost_48m_fck",
+	.clkdm_name = "usbhost_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usbhost_48m_fck_data,
+};
+
+static struct ti_clk_gate des1_ick_data = {
+	.parent = "security_l4_ick2",
+	.bit_shift = 0,
+	.reg = 0xa14,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des1_ick = {
+	.name = "des1_ick",
+	.type = TI_CLK_GATE,
+	.data = &des1_ick_data,
+};
+
+static struct ti_clk_gate sgx_gate_fck_data = {
+	.parent = "core_ck",
+	.bit_shift = 1,
+	.reg = 0xb00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor core_d4_ck_data = {
+	.parent = "core_ck",
+	.div = 4,
+	.mult = 1,
+};
+
+static struct ti_clk core_d4_ck = {
+	.name = "core_d4_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
+	.parent = "dpll4_m2x2_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk omap_192m_alwon_fck = {
+	.name = "omap_192m_alwon_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_192m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d2_ck_data = {
+	.parent = "core_ck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk core_d2_ck = {
+	.name = "core_d2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d3_fck_data = {
+	.parent = "corex2_fck",
+	.div = 3,
+	.mult = 1,
+};
+
+static struct ti_clk corex2_d3_fck = {
+	.name = "corex2_d3_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &corex2_d3_fck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d5_fck_data = {
+	.parent = "corex2_fck",
+	.div = 5,
+	.mult = 1,
+};
+
+static struct ti_clk corex2_d5_fck = {
+	.name = "corex2_d5_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &corex2_d5_fck_data,
+};
+
+static const char *sgx_mux_fck_parents[] = {
+	"core_d3_ck",
+	"core_d4_ck",
+	"core_d6_ck",
+	"cm_96m_fck",
+	"omap_192m_alwon_fck",
+	"core_d2_ck",
+	"corex2_d3_fck",
+	"corex2_d5_fck",
+};
+
+static struct ti_clk_mux sgx_mux_fck_data = {
+	.num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
+	.reg = 0xb40,
+	.module = TI_CLKM_CM,
+	.parents = sgx_mux_fck_parents,
+};
+
+static struct ti_clk_composite sgx_fck_data = {
+	.mux = &sgx_mux_fck_data,
+	.gate = &sgx_gate_fck_data,
+};
+
+static struct ti_clk sgx_fck = {
+	.name = "sgx_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &sgx_fck_data,
+};
+
+static struct ti_clk_gate mcspi1_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 18,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi1_fck = {
+	.name = "mcspi1_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi1_fck_data,
+};
+
+static struct ti_clk_gate mmchs2_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 25,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs2_fck = {
+	.name = "mmchs2_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs2_fck_data,
+};
+
+static struct ti_clk_gate mcspi2_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 19,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi2_fck = {
+	.name = "mcspi2_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi2_fck_data,
+};
+
+static struct ti_clk_gate vpfe_fck_data = {
+	.parent = "pclk_ck",
+	.bit_shift = 10,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk vpfe_fck = {
+	.name = "vpfe_fck",
+	.type = TI_CLK_GATE,
+	.data = &vpfe_fck_data,
+};
+
+static struct ti_clk_gate gpt4_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 5,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate mcbsp1_gate_fck_data = {
+	.parent = "mcbsp_clks",
+	.bit_shift = 9,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt5_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 6,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static const char *gpt5_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt5_mux_fck_data = {
+	.bit_shift = 3,
+	.num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt5_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt5_fck_data = {
+	.mux = &gpt5_mux_fck_data,
+	.gate = &gpt5_gate_fck_data,
+};
+
+static struct ti_clk gpt5_fck = {
+	.name = "gpt5_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt5_fck_data,
+};
+
+static struct ti_clk_gate ts_fck_data = {
+	.parent = "omap_32k_fck",
+	.bit_shift = 1,
+	.reg = 0xa08,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk ts_fck = {
+	.name = "ts_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &ts_fck_data,
+};
+
+static struct ti_clk_fixed_factor wdt1_fck_data = {
+	.parent = "secure_32k_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk wdt1_fck = {
+	.name = "wdt1_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &wdt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
+	.parent = "dpll4_m6x2_mul_ck",
+	.bit_shift = 0x1f,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck_omap36xx = {
+	.name = "dpll4_m6x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m6x2_ck_omap36xx_data,
+	.patch = &dpll4_m6x2_ck,
+};
+
+static const char *gpt4_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt4_mux_fck_data = {
+	.bit_shift = 2,
+	.num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt4_mux_fck_parents,
+};
+
+static struct ti_clk_gate usbhost_ick_data = {
+	.parent = "l4_ick",
+	.bit_shift = 0,
+	.reg = 0x1410,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbhost_ick = {
+	.name = "usbhost_ick",
+	.clkdm_name = "usbhost_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usbhost_ick_data,
+};
+
+static struct ti_clk_gate mcbsp2_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 0,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp2_ick = {
+	.name = "mcbsp2_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcbsp2_ick_data,
+};
+
+static struct ti_clk_gate omapctrl_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 6,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omapctrl_ick = {
+	.name = "omapctrl_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &omapctrl_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
+	.parent = "omap_96m_fck",
+	.div = 4,
+	.mult = 1,
+};
+
+static struct ti_clk omap_96m_d4_fck = {
+	.name = "omap_96m_d4_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_96m_d4_fck_data,
+};
+
+static struct ti_clk_gate gpt6_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 7,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt6_ick = {
+	.name = "gpt6_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt6_ick_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
+	.parent = "dpll3_m3x2_mul_ck",
+	.bit_shift = 0xc,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck_omap36xx = {
+	.name = "dpll3_m3x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll3_m3x2_ck_omap36xx_data,
+	.patch = &dpll3_m3x2_ck,
+};
+
+static struct ti_clk_gate i2c3_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 17,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c3_ick = {
+	.name = "i2c3_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c3_ick_data,
+};
+
+static struct ti_clk_gate gpio6_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 17,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio6_ick = {
+	.name = "gpio6_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio6_ick_data,
+};
+
+static struct ti_clk_gate mspro_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 23,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mspro_ick = {
+	.name = "mspro_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mspro_ick_data,
+};
+
+static struct ti_clk_composite mcbsp1_fck_data = {
+	.mux = &mcbsp1_mux_fck_data,
+	.gate = &mcbsp1_gate_fck_data,
+};
+
+static struct ti_clk mcbsp1_fck = {
+	.name = "mcbsp1_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &mcbsp1_fck_data,
+};
+
+static struct ti_clk_gate gpt3_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 4,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed rmii_ck_data = {
+	.frequency = 50000000,
+};
+
+static struct ti_clk rmii_ck = {
+	.name = "rmii_ck",
+	.type = TI_CLK_FIXED,
+	.data = &rmii_ck_data,
+};
+
+static struct ti_clk_gate gpt6_gate_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 7,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt6_fck_data = {
+	.mux = &gpt6_mux_fck_data,
+	.gate = &gpt6_gate_fck_data,
+};
+
+static struct ti_clk gpt6_fck = {
+	.name = "gpt6_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt6_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
+	.parent = "dpll5_m2_ck",
+	.div = 4,
+	.mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d4_ck = {
+	.name = "dpll5_m2_d4_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll5_m2_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor sys_d2_ck_data = {
+	.parent = "sys_ck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk sys_d2_ck = {
+	.name = "sys_d2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &sys_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
+	.parent = "omap_96m_fck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk omap_96m_d2_fck = {
+	.name = "omap_96m_d2_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
+	.parent = "dpll5_m2_ck",
+	.div = 8,
+	.mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d8_ck = {
+	.name = "dpll5_m2_d8_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll5_m2_d8_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
+	.parent = "dpll5_m2_ck",
+	.div = 16,
+	.mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d16_ck = {
+	.name = "dpll5_m2_d16_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll5_m2_d16_ck_data,
+};
+
+static const char *usim_mux_fck_parents[] = {
+	"sys_ck",
+	"sys_d2_ck",
+	"omap_96m_d2_fck",
+	"omap_96m_d4_fck",
+	"omap_96m_d8_fck",
+	"omap_96m_d10_fck",
+	"dpll5_m2_d4_ck",
+	"dpll5_m2_d8_ck",
+	"dpll5_m2_d16_ck",
+	"dpll5_m2_d20_ck",
+};
+
+static struct ti_clk_mux usim_mux_fck_data = {
+	.bit_shift = 3,
+	.num_parents = ARRAY_SIZE(usim_mux_fck_parents),
+	.reg = 0xc40,
+	.module = TI_CLKM_CM,
+	.parents = usim_mux_fck_parents,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usim_fck_data = {
+	.mux = &usim_mux_fck_data,
+	.gate = &usim_gate_fck_data,
+};
+
+static struct ti_clk usim_fck = {
+	.name = "usim_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &usim_fck_data,
+};
+
+static int ssi_ssr_div_fck_3430es2_divs[] = {
+	0,
+	1,
+	2,
+	3,
+	4,
+	0,
+	6,
+	0,
+	8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
+	.num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
+	.parent = "corex2_fck",
+	.bit_shift = 8,
+	.dividers = ssi_ssr_div_fck_3430es2_divs,
+	.reg = 0xa40,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
+	.gate = &ssi_ssr_gate_fck_3430es2_data,
+	.divider = &ssi_ssr_div_fck_3430es2_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es2 = {
+	.name = "ssi_ssr_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &ssi_ssr_fck_3430es2_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
+	.parent = "dpll4_m4x2_ck",
+	.bit_shift = 0,
+	.reg = 0xe00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es1 = {
+	.name = "dss1_alwon_fck",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss1_alwon_fck_3430es1_data,
+};
+
+static struct ti_clk_gate gpt3_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 4,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt3_ick = {
+	.name = "gpt3_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt3_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_12m_fck_data = {
+	.parent = "omap_48m_fck",
+	.div = 4,
+	.mult = 1,
+};
+
+static struct ti_clk omap_12m_fck = {
+	.name = "omap_12m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &omap_12m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_12m_fck_data = {
+	.parent = "omap_12m_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk core_12m_fck = {
+	.name = "core_12m_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &core_12m_fck_data,
+};
+
+static struct ti_clk_gate hdq_fck_data = {
+	.parent = "core_12m_fck",
+	.bit_shift = 22,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk hdq_fck = {
+	.name = "hdq_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hdq_fck_data,
+};
+
+static struct ti_clk_gate usbtll_fck_data = {
+	.parent = "dpll5_m2_ck",
+	.bit_shift = 2,
+	.reg = 0xa08,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk usbtll_fck = {
+	.name = "usbtll_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usbtll_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
+	.parent = "sys_ck",
+	.bit_shift = 8,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk hsotgusb_fck_am35xx = {
+	.name = "hsotgusb_fck_am35xx",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hsotgusb_fck_am35xx_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
+	.parent = "core_l3_ick",
+	.bit_shift = 4,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es2 = {
+	.name = "hsotgusb_ick_3430es2",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hsotgusb_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gfx_l3_ck_data = {
+	.parent = "l3_ick",
+	.bit_shift = 0,
+	.reg = 0xb10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_l3_ck = {
+	.name = "gfx_l3_ck",
+	.clkdm_name = "gfx_3430es1_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gfx_l3_ck_data,
+};
+
+static struct ti_clk_fixed_factor gfx_l3_ick_data = {
+	.parent = "gfx_l3_ck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk gfx_l3_ick = {
+	.name = "gfx_l3_ick",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &gfx_l3_ick_data,
+};
+
+static struct ti_clk_gate mcbsp1_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 9,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp1_ick = {
+	.name = "mcbsp1_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcbsp1_ick_data,
+};
+
+static struct ti_clk_fixed_factor gpt12_fck_data = {
+	.parent = "secure_32k_fck",
+	.div = 1,
+	.mult = 1,
+};
+
+static struct ti_clk gpt12_fck = {
+	.name = "gpt12_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &gpt12_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg2_ck_data = {
+	.parent = "gfx_l3_fck",
+	.bit_shift = 2,
+	.reg = 0xb00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg2_ck = {
+	.name = "gfx_cg2_ck",
+	.clkdm_name = "gfx_3430es1_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gfx_cg2_ck_data,
+};
+
+static struct ti_clk_gate i2c2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 16,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c2_ick = {
+	.name = "i2c2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c2_ick_data,
+};
+
+static struct ti_clk_gate gpio4_dbck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 15,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio4_dbck = {
+	.name = "gpio4_dbck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio4_dbck_data,
+};
+
+static struct ti_clk_gate i2c3_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 17,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c3_fck = {
+	.name = "i2c3_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c3_fck_data,
+};
+
+static struct ti_clk_composite gpt3_fck_data = {
+	.mux = &gpt3_mux_fck_data,
+	.gate = &gpt3_gate_fck_data,
+};
+
+static struct ti_clk gpt3_fck = {
+	.name = "gpt3_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt3_fck_data,
+};
+
+static struct ti_clk_gate i2c1_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 15,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c1_ick = {
+	.name = "i2c1_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &i2c1_ick_data,
+};
+
+static struct ti_clk_gate omap_32ksync_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 2,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omap_32ksync_ick = {
+	.name = "omap_32ksync_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &omap_32ksync_ick_data,
+};
+
+static struct ti_clk_gate aes2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 28,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes2_ick = {
+	.name = "aes2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &aes2_ick_data,
+};
+
+static const char *gpt8_mux_fck_parents[] = {
+	"omap_32k_fck",
+	"sys_ck",
+};
+
+static struct ti_clk_mux gpt8_mux_fck_data = {
+	.bit_shift = 6,
+	.num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
+	.reg = 0x1040,
+	.module = TI_CLKM_CM,
+	.parents = gpt8_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt8_fck_data = {
+	.mux = &gpt8_mux_fck_data,
+	.gate = &gpt8_gate_fck_data,
+};
+
+static struct ti_clk gpt8_fck = {
+	.name = "gpt8_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt8_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_gate_fck_data = {
+	.parent = "mcbsp_clks",
+	.bit_shift = 2,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite mcbsp4_fck_data = {
+	.mux = &mcbsp4_mux_fck_data,
+	.gate = &mcbsp4_gate_fck_data,
+};
+
+static struct ti_clk mcbsp4_fck = {
+	.name = "mcbsp4_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &mcbsp4_fck_data,
+};
+
+static struct ti_clk_gate gpio2_dbck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 13,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio2_dbck = {
+	.name = "gpio2_dbck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio2_dbck_data,
+};
+
+static struct ti_clk_gate usbtll_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 2,
+	.reg = 0xa18,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbtll_ick = {
+	.name = "usbtll_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &usbtll_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 21,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi4_ick = {
+	.name = "mcspi4_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi4_ick_data,
+};
+
+static struct ti_clk_gate dss_96m_fck_data = {
+	.parent = "omap_96m_fck",
+	.bit_shift = 2,
+	.reg = 0xe00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_96m_fck = {
+	.name = "dss_96m_fck",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss_96m_fck_data,
+};
+
+static struct ti_clk_divider rm_ick_data = {
+	.parent = "l4_ick",
+	.bit_shift = 1,
+	.max_div = 3,
+	.reg = 0xc40,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk rm_ick = {
+	.name = "rm_ick",
+	.type = TI_CLK_DIVIDER,
+	.data = &rm_ick_data,
+};
+
+static struct ti_clk_gate hdq_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 22,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hdq_ick = {
+	.name = "hdq_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &hdq_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
+	.parent = "dpll3_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll3_x2_ck = {
+	.name = "dpll3_x2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll3_x2_ck_data,
+};
+
+static struct ti_clk_gate mad2d_ick_data = {
+	.parent = "l3_ick",
+	.bit_shift = 3,
+	.reg = 0xa18,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mad2d_ick = {
+	.name = "mad2d_ick",
+	.clkdm_name = "d2d_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mad2d_ick_data,
+};
+
+static struct ti_clk_gate fshostusb_fck_data = {
+	.parent = "core_48m_fck",
+	.bit_shift = 5,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk fshostusb_fck = {
+	.name = "fshostusb_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &fshostusb_fck_data,
+};
+
+static struct ti_clk_gate sr1_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 6,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr1_fck = {
+	.name = "sr1_fck",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sr1_fck_data,
+};
+
+static struct ti_clk_gate des2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 26,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des2_ick = {
+	.name = "des2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &des2_ick_data,
+};
+
+static struct ti_clk_gate sdrc_ick_data = {
+	.parent = "core_l3_ick",
+	.bit_shift = 1,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk sdrc_ick = {
+	.name = "sdrc_ick",
+	.clkdm_name = "core_l3_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sdrc_ick_data,
+};
+
+static struct ti_clk_composite gpt4_fck_data = {
+	.mux = &gpt4_mux_fck_data,
+	.gate = &gpt4_gate_fck_data,
+};
+
+static struct ti_clk gpt4_fck = {
+	.name = "gpt4_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt4_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
+	.parent = "dpll4_m3x2_mul_ck",
+	.bit_shift = 0x1c,
+	.reg = 0xd00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck_omap36xx = {
+	.name = "dpll4_m3x2_ck",
+	.type = TI_CLK_GATE,
+	.data = &dpll4_m3x2_ck_omap36xx_data,
+	.patch = &dpll4_m3x2_ck,
+};
+
+static struct ti_clk_gate cpefuse_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 0,
+	.reg = 0xa08,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk cpefuse_fck = {
+	.name = "cpefuse_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &cpefuse_fck_data,
+};
+
+static struct ti_clk_gate mcspi3_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 20,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi3_ick = {
+	.name = "mcspi3_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi3_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
+	.parent = "ssi_ssr_fck",
+	.div = 2,
+	.mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es2 = {
+	.name = "ssi_sst_fck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &ssi_sst_fck_3430es2_data,
+};
+
+static struct ti_clk_gate gpio1_dbck_data = {
+	.parent = "wkup_32k_fck",
+	.bit_shift = 3,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio1_dbck = {
+	.name = "gpio1_dbck",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio1_dbck_data,
+};
+
+static struct ti_clk_gate gpt4_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 5,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt4_ick = {
+	.name = "gpt4_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt4_ick_data,
+};
+
+static struct ti_clk_gate gpt2_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 3,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt2_ick = {
+	.name = "gpt2_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_fck_data = {
+	.parent = "core_96m_fck",
+	.bit_shift = 24,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs1_fck = {
+	.name = "mmchs1_fck",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mmchs1_fck_data,
+};
+
+static struct ti_clk_fixed dummy_apb_pclk_data = {
+	.frequency = 0x0,
+};
+
+static struct ti_clk dummy_apb_pclk = {
+	.name = "dummy_apb_pclk",
+	.type = TI_CLK_FIXED,
+	.data = &dummy_apb_pclk_data,
+};
+
+static struct ti_clk_gate gpio6_dbck_data = {
+	.parent = "per_32k_alwon_fck",
+	.bit_shift = 17,
+	.reg = 0x1000,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio6_dbck = {
+	.name = "gpio6_dbck",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio6_dbck_data,
+};
+
+static struct ti_clk_gate uart2_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 14,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart2_ick = {
+	.name = "uart2_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &uart2_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
+	.parent = "dpll4_ck",
+	.div = 1,
+	.mult = 2,
+};
+
+static struct ti_clk dpll4_x2_ck = {
+	.name = "dpll4_x2_ck",
+	.type = TI_CLK_FIXED_FACTOR,
+	.data = &dpll4_x2_ck_data,
+};
+
+static struct ti_clk_gate gpt7_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 8,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt7_ick = {
+	.name = "gpt7_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpt7_ick_data,
+};
+
+static struct ti_clk_gate dss_tv_fck_data = {
+	.parent = "omap_54m_fck",
+	.bit_shift = 2,
+	.reg = 0xe00,
+	.module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_tv_fck = {
+	.name = "dss_tv_fck",
+	.clkdm_name = "dss_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &dss_tv_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 10,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp5_ick = {
+	.name = "mcbsp5_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcbsp5_ick_data,
+};
+
+static struct ti_clk_gate mcspi1_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 18,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi1_ick = {
+	.name = "mcspi1_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &mcspi1_ick_data,
+};
+
+static struct ti_clk_gate d2d_26m_fck_data = {
+	.parent = "sys_ck",
+	.bit_shift = 3,
+	.reg = 0xa00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk d2d_26m_fck = {
+	.name = "d2d_26m_fck",
+	.clkdm_name = "d2d_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &d2d_26m_fck_data,
+};
+
+static struct ti_clk_gate wdt3_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 12,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt3_ick = {
+	.name = "wdt3_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &wdt3_ick_data,
+};
+
+static struct ti_clk_divider pclkx2_fck_data = {
+	.parent = "emu_src_ck",
+	.bit_shift = 6,
+	.max_div = 3,
+	.reg = 0x1140,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclkx2_fck = {
+	.name = "pclkx2_fck",
+	.type = TI_CLK_DIVIDER,
+	.data = &pclkx2_fck_data,
+};
+
+static struct ti_clk_gate sha12_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 27,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha12_ick = {
+	.name = "sha12_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sha12_ick_data,
+};
+
+static struct ti_clk_gate emac_fck_data = {
+	.parent = "rmii_ck",
+	.bit_shift = 9,
+	.reg = 0x59c,
+	.module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk emac_fck = {
+	.name = "emac_fck",
+	.type = TI_CLK_GATE,
+	.data = &emac_fck_data,
+};
+
+static struct ti_clk_composite gpt10_fck_data = {
+	.mux = &gpt10_mux_fck_data,
+	.gate = &gpt10_gate_fck_data,
+};
+
+static struct ti_clk gpt10_fck = {
+	.name = "gpt10_fck",
+	.type = TI_CLK_COMPOSITE,
+	.data = &gpt10_fck_data,
+};
+
+static struct ti_clk_gate wdt2_fck_data = {
+	.parent = "wkup_32k_fck",
+	.bit_shift = 5,
+	.reg = 0xc00,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt2_fck = {
+	.name = "wdt2_fck",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &wdt2_fck_data,
+};
+
+static struct ti_clk_gate cam_ick_data = {
+	.parent = "l4_ick",
+	.bit_shift = 0,
+	.reg = 0xf10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk cam_ick = {
+	.name = "cam_ick",
+	.clkdm_name = "cam_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &cam_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es2_data = {
+	.parent = "ssi_l4_ick",
+	.bit_shift = 0,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es2 = {
+	.name = "ssi_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &ssi_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gpio4_ick_data = {
+	.parent = "per_l4_ick",
+	.bit_shift = 15,
+	.reg = 0x1010,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio4_ick = {
+	.name = "gpio4_ick",
+	.clkdm_name = "per_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &gpio4_ick_data,
+};
+
+static struct ti_clk_gate wdt1_ick_data = {
+	.parent = "wkup_l4_ick",
+	.bit_shift = 4,
+	.reg = 0xc10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt1_ick = {
+	.name = "wdt1_ick",
+	.clkdm_name = "wkup_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &wdt1_ick_data,
+};
+
+static struct ti_clk_gate rng_ick_data = {
+	.parent = "security_l4_ick2",
+	.bit_shift = 2,
+	.reg = 0xa14,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk rng_ick = {
+	.name = "rng_ick",
+	.type = TI_CLK_GATE,
+	.data = &rng_ick_data,
+};
+
+static struct ti_clk_gate icr_ick_data = {
+	.parent = "core_l4_ick",
+	.bit_shift = 29,
+	.reg = 0xa10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk icr_ick = {
+	.name = "icr_ick",
+	.clkdm_name = "core_l4_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &icr_ick_data,
+};
+
+static struct ti_clk_gate sgx_ick_data = {
+	.parent = "l3_ick",
+	.bit_shift = 0,
+	.reg = 0xb10,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_WAIT,
+};
+
+static struct ti_clk sgx_ick = {
+	.name = "sgx_ick",
+	.clkdm_name = "sgx_clkdm",
+	.type = TI_CLK_GATE,
+	.data = &sgx_ick_data,
+};
+
+static struct ti_clk_divider sys_clkout2_data = {
+	.parent = "clkout2_src_ck",
+	.bit_shift = 3,
+	.max_div = 64,
+	.reg = 0xd70,
+	.module = TI_CLKM_CM,
+	.flags = CLKF_INDEX_POWER_OF_TWO,
+};
+
+static struct ti_clk sys_clkout2 = {
+	.name = "sys_clkout2",
+	.type = TI_CLK_DIVIDER,
+	.data = &sys_clkout2_data,
+};
+
+static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
+	CLK(NULL, "security_l4_ick2", &security_l4_ick2),
+	CLK(NULL, "aes1_ick", &aes1_ick),
+	CLK("omap_rng", "ick", &rng_ick),
+	CLK("omap3-rom-rng", "ick", &rng_ick),
+	CLK(NULL, "sha11_ick", &sha11_ick),
+	CLK(NULL, "des1_ick", &des1_ick),
+	CLK(NULL, "cam_mclk", &cam_mclk),
+	CLK(NULL, "cam_ick", &cam_ick),
+	CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
+	CLK(NULL, "security_l3_ick", &security_l3_ick),
+	CLK(NULL, "pka_ick", &pka_ick),
+	CLK(NULL, "icr_ick", &icr_ick),
+	CLK(NULL, "des2_ick", &des2_ick),
+	CLK(NULL, "mspro_ick", &mspro_ick),
+	CLK(NULL, "mailboxes_ick", &mailboxes_ick),
+	CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
+	CLK(NULL, "sr1_fck", &sr1_fck),
+	CLK(NULL, "sr2_fck", &sr2_fck),
+	CLK(NULL, "sr_l4_ick", &sr_l4_ick),
+	CLK(NULL, "dpll2_fck", &dpll2_fck),
+	CLK(NULL, "dpll2_ck", &dpll2_ck),
+	CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
+	CLK(NULL, "iva2_ck", &iva2_ck),
+	CLK(NULL, "modem_fck", &modem_fck),
+	CLK(NULL, "sad2d_ick", &sad2d_ick),
+	CLK(NULL, "mad2d_ick", &mad2d_ick),
+	CLK(NULL, "mspro_fck", &mspro_fck),
+	{ NULL },
+};
+
+static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
+	CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
+	CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
+	CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
+	CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
+	CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
+	CLK(NULL, "sys_d2_ck", &sys_d2_ck),
+	CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
+	CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
+	CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
+	CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
+	CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
+	CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
+	CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
+	CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
+	CLK(NULL, "usim_fck", &usim_fck),
+	CLK(NULL, "usim_ick", &usim_ick),
+	{ NULL },
+};
+
+static struct ti_clk_alias omap3xxx_clks[] = {
+	CLK(NULL, "apb_pclk", &dummy_apb_pclk),
+	CLK(NULL, "omap_32k_fck", &omap_32k_fck),
+	CLK(NULL, "virt_12m_ck", &virt_12m_ck),
+	CLK(NULL, "virt_13m_ck", &virt_13m_ck),
+	CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
+	CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
+	CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
+	CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
+	CLK(NULL, "osc_sys_ck", &osc_sys_ck),
+	CLK("twl", "fck", &osc_sys_ck),
+	CLK(NULL, "sys_ck", &sys_ck),
+	CLK(NULL, "timer_sys_ck", &sys_ck),
+	CLK(NULL, "dpll4_ck", &dpll4_ck),
+	CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
+	CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
+	CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
+	CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
+	CLK(NULL, "dpll3_ck", &dpll3_ck),
+	CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
+	CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
+	CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
+	CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
+	CLK(NULL, "sys_altclk", &sys_altclk),
+	CLK(NULL, "mcbsp_clks", &mcbsp_clks),
+	CLK(NULL, "sys_clkout1", &sys_clkout1),
+	CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
+	CLK(NULL, "core_ck", &core_ck),
+	CLK(NULL, "dpll1_fck", &dpll1_fck),
+	CLK(NULL, "dpll1_ck", &dpll1_ck),
+	CLK(NULL, "cpufreq_ck", &dpll1_ck),
+	CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
+	CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
+	CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
+	CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
+	CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
+	CLK(NULL, "cm_96m_fck", &cm_96m_fck),
+	CLK(NULL, "omap_96m_fck", &omap_96m_fck),
+	CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
+	CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
+	CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
+	CLK(NULL, "omap_54m_fck", &omap_54m_fck),
+	CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
+	CLK(NULL, "omap_48m_fck", &omap_48m_fck),
+	CLK(NULL, "omap_12m_fck", &omap_12m_fck),
+	CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
+	CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
+	CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
+	CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
+	CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
+	CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
+	CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
+	CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
+	CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
+	CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
+	CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
+	CLK(NULL, "sys_clkout2", &sys_clkout2),
+	CLK(NULL, "corex2_fck", &corex2_fck),
+	CLK(NULL, "mpu_ck", &mpu_ck),
+	CLK(NULL, "arm_fck", &arm_fck),
+	CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
+	CLK(NULL, "l3_ick", &l3_ick),
+	CLK(NULL, "l4_ick", &l4_ick),
+	CLK(NULL, "rm_ick", &rm_ick),
+	CLK(NULL, "timer_32k_ck", &omap_32k_fck),
+	CLK(NULL, "gpt10_fck", &gpt10_fck),
+	CLK(NULL, "gpt11_fck", &gpt11_fck),
+	CLK(NULL, "core_96m_fck", &core_96m_fck),
+	CLK(NULL, "mmchs2_fck", &mmchs2_fck),
+	CLK(NULL, "mmchs1_fck", &mmchs1_fck),
+	CLK(NULL, "i2c3_fck", &i2c3_fck),
+	CLK(NULL, "i2c2_fck", &i2c2_fck),
+	CLK(NULL, "i2c1_fck", &i2c1_fck),
+	CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
+	CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
+	CLK(NULL, "core_48m_fck", &core_48m_fck),
+	CLK(NULL, "mcspi4_fck", &mcspi4_fck),
+	CLK(NULL, "mcspi3_fck", &mcspi3_fck),
+	CLK(NULL, "mcspi2_fck", &mcspi2_fck),
+	CLK(NULL, "mcspi1_fck", &mcspi1_fck),
+	CLK(NULL, "uart2_fck", &uart2_fck),
+	CLK(NULL, "uart1_fck", &uart1_fck),
+	CLK(NULL, "core_12m_fck", &core_12m_fck),
+	CLK("omap_hdq.0", "fck", &hdq_fck),
+	CLK(NULL, "hdq_fck", &hdq_fck),
+	CLK(NULL, "core_l3_ick", &core_l3_ick),
+	CLK(NULL, "sdrc_ick", &sdrc_ick),
+	CLK(NULL, "gpmc_fck", &gpmc_fck),
+	CLK(NULL, "core_l4_ick", &core_l4_ick),
+	CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
+	CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
+	CLK(NULL, "mmchs2_ick", &mmchs2_ick),
+	CLK(NULL, "mmchs1_ick", &mmchs1_ick),
+	CLK("omap_hdq.0", "ick", &hdq_ick),
+	CLK(NULL, "hdq_ick", &hdq_ick),
+	CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
+	CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
+	CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
+	CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
+	CLK(NULL, "mcspi4_ick", &mcspi4_ick),
+	CLK(NULL, "mcspi3_ick", &mcspi3_ick),
+	CLK(NULL, "mcspi2_ick", &mcspi2_ick),
+	CLK(NULL, "mcspi1_ick", &mcspi1_ick),
+	CLK("omap_i2c.3", "ick", &i2c3_ick),
+	CLK("omap_i2c.2", "ick", &i2c2_ick),
+	CLK("omap_i2c.1", "ick", &i2c1_ick),
+	CLK(NULL, "i2c3_ick", &i2c3_ick),
+	CLK(NULL, "i2c2_ick", &i2c2_ick),
+	CLK(NULL, "i2c1_ick", &i2c1_ick),
+	CLK(NULL, "uart2_ick", &uart2_ick),
+	CLK(NULL, "uart1_ick", &uart1_ick),
+	CLK(NULL, "gpt11_ick", &gpt11_ick),
+	CLK(NULL, "gpt10_ick", &gpt10_ick),
+	CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
+	CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
+	CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
+	CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
+	CLK(NULL, "omapctrl_ick", &omapctrl_ick),
+	CLK(NULL, "dss_tv_fck", &dss_tv_fck),
+	CLK(NULL, "dss_96m_fck", &dss_96m_fck),
+	CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
+	CLK(NULL, "init_60m_fclk", &dummy_ck),
+	CLK(NULL, "gpt1_fck", &gpt1_fck),
+	CLK(NULL, "aes2_ick", &aes2_ick),
+	CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
+	CLK(NULL, "gpio1_dbck", &gpio1_dbck),
+	CLK(NULL, "sha12_ick", &sha12_ick),
+	CLK(NULL, "wdt2_fck", &wdt2_fck),
+	CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
+	CLK("omap_wdt", "ick", &wdt2_ick),
+	CLK(NULL, "wdt2_ick", &wdt2_ick),
+	CLK(NULL, "wdt1_ick", &wdt1_ick),
+	CLK(NULL, "gpio1_ick", &gpio1_ick),
+	CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
+	CLK(NULL, "gpt12_ick", &gpt12_ick),
+	CLK(NULL, "gpt1_ick", &gpt1_ick),
+	CLK(NULL, "per_96m_fck", &per_96m_fck),
+	CLK(NULL, "per_48m_fck", &per_48m_fck),
+	CLK(NULL, "uart3_fck", &uart3_fck),
+	CLK(NULL, "gpt2_fck", &gpt2_fck),
+	CLK(NULL, "gpt3_fck", &gpt3_fck),
+	CLK(NULL, "gpt4_fck", &gpt4_fck),
+	CLK(NULL, "gpt5_fck", &gpt5_fck),
+	CLK(NULL, "gpt6_fck", &gpt6_fck),
+	CLK(NULL, "gpt7_fck", &gpt7_fck),
+	CLK(NULL, "gpt8_fck", &gpt8_fck),
+	CLK(NULL, "gpt9_fck", &gpt9_fck),
+	CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
+	CLK(NULL, "gpio6_dbck", &gpio6_dbck),
+	CLK(NULL, "gpio5_dbck", &gpio5_dbck),
+	CLK(NULL, "gpio4_dbck", &gpio4_dbck),
+	CLK(NULL, "gpio3_dbck", &gpio3_dbck),
+	CLK(NULL, "gpio2_dbck", &gpio2_dbck),
+	CLK(NULL, "wdt3_fck", &wdt3_fck),
+	CLK(NULL, "per_l4_ick", &per_l4_ick),
+	CLK(NULL, "gpio6_ick", &gpio6_ick),
+	CLK(NULL, "gpio5_ick", &gpio5_ick),
+	CLK(NULL, "gpio4_ick", &gpio4_ick),
+	CLK(NULL, "gpio3_ick", &gpio3_ick),
+	CLK(NULL, "gpio2_ick", &gpio2_ick),
+	CLK(NULL, "wdt3_ick", &wdt3_ick),
+	CLK(NULL, "uart3_ick", &uart3_ick),
+	CLK(NULL, "uart4_ick", &uart4_ick),
+	CLK(NULL, "gpt9_ick", &gpt9_ick),
+	CLK(NULL, "gpt8_ick", &gpt8_ick),
+	CLK(NULL, "gpt7_ick", &gpt7_ick),
+	CLK(NULL, "gpt6_ick", &gpt6_ick),
+	CLK(NULL, "gpt5_ick", &gpt5_ick),
+	CLK(NULL, "gpt4_ick", &gpt4_ick),
+	CLK(NULL, "gpt3_ick", &gpt3_ick),
+	CLK(NULL, "gpt2_ick", &gpt2_ick),
+	CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
+	CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
+	CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
+	CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
+	CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
+	CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
+	CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
+	CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
+	CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
+	CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+	CLK("etb", "emu_src_ck", &emu_src_ck),
+	CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+	CLK(NULL, "emu_src_ck", &emu_src_ck),
+	CLK(NULL, "pclk_fck", &pclk_fck),
+	CLK(NULL, "pclkx2_fck", &pclkx2_fck),
+	CLK(NULL, "atclk_fck", &atclk_fck),
+	CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
+	CLK(NULL, "traceclk_fck", &traceclk_fck),
+	CLK(NULL, "secure_32k_fck", &secure_32k_fck),
+	CLK(NULL, "gpt12_fck", &gpt12_fck),
+	CLK(NULL, "wdt1_fck", &wdt1_fck),
+	{ NULL },
+};
+
+static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
+	CLK(NULL, "dpll5_ck", &dpll5_ck),
+	CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
+	CLK(NULL, "core_d3_ck", &core_d3_ck),
+	CLK(NULL, "core_d4_ck", &core_d4_ck),
+	CLK(NULL, "core_d6_ck", &core_d6_ck),
+	CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
+	CLK(NULL, "core_d2_ck", &core_d2_ck),
+	CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
+	CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
+	CLK(NULL, "sgx_fck", &sgx_fck),
+	CLK(NULL, "sgx_ick", &sgx_ick),
+	CLK(NULL, "cpefuse_fck", &cpefuse_fck),
+	CLK(NULL, "ts_fck", &ts_fck),
+	CLK(NULL, "usbtll_fck", &usbtll_fck),
+	CLK(NULL, "usbtll_ick", &usbtll_ick),
+	CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
+	CLK(NULL, "mmchs3_ick", &mmchs3_ick),
+	CLK(NULL, "mmchs3_fck", &mmchs3_fck),
+	CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
+	CLK("omapdss_dss", "ick", &dss_ick_3430es2),
+	CLK(NULL, "dss_ick", &dss_ick_3430es2),
+	CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
+	CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
+	CLK(NULL, "usbhost_ick", &usbhost_ick),
+	{ NULL },
+};
+
+static struct ti_clk_alias omap3430es1_clks[] = {
+	CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
+	CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
+	CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
+	CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
+	CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
+	CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
+	CLK(NULL, "fshostusb_fck", &fshostusb_fck),
+	CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
+	CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
+	CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
+	CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
+	CLK(NULL, "fac_ick", &fac_ick),
+	CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
+	CLK(NULL, "usb_l4_ick", &usb_l4_ick),
+	CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
+	CLK("omapdss_dss", "ick", &dss_ick_3430es1),
+	CLK(NULL, "dss_ick", &dss_ick_3430es1),
+	{ NULL },
+};
+
+static struct ti_clk_alias omap36xx_clks[] = {
+	CLK(NULL, "uart4_fck", &uart4_fck),
+	{ NULL },
+};
+
+static struct ti_clk_alias am35xx_clks[] = {
+	CLK(NULL, "ipss_ick", &ipss_ick),
+	CLK(NULL, "rmii_ck", &rmii_ck),
+	CLK(NULL, "pclk_ck", &pclk_ck),
+	CLK(NULL, "emac_ick", &emac_ick),
+	CLK(NULL, "emac_fck", &emac_fck),
+	CLK("davinci_emac.0", NULL, &emac_ick),
+	CLK("davinci_mdio.0", NULL, &emac_fck),
+	CLK("vpfe-capture", "master", &vpfe_ick),
+	CLK("vpfe-capture", "slave", &vpfe_fck),
+	CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
+	CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
+	CLK(NULL, "hecc_ck", &hecc_ck),
+	CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
+	CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
+	{ NULL },
+};
+
+static struct ti_clk *omap36xx_clk_patches[] = {
+	&dpll4_m3x2_ck_omap36xx,
+	&dpll3_m3x2_ck_omap36xx,
+	&dpll4_m6x2_ck_omap36xx,
+	&dpll4_m2x2_ck_omap36xx,
+	&dpll4_m5x2_ck_omap36xx,
+	&dpll4_ck_omap36xx,
+	NULL,
+};
+
+static const char *enable_init_clks[] = {
+	"sdrc_ick",
+	"gpmc_fck",
+	"omapctrl_ick",
+};
+
+static void __init omap3_clk_legacy_common_init(void)
+{
+	omap2_clk_disable_autoidle_all();
+
+	omap2_clk_enable_init_clocks(enable_init_clks,
+				     ARRAY_SIZE(enable_init_clks));
+
+	pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+		(clk_get_rate(osc_sys_ck.clk) / 1000000),
+		(clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
+		(clk_get_rate(core_ck.clk) / 1000000),
+		(clk_get_rate(arm_fck.clk) / 1000000));
+}
+
+int __init omap3430es1_clk_legacy_init(void)
+{
+	int r;
+
+	r = ti_clk_register_legacy_clks(omap3430es1_clks);
+	r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+	r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+	omap3_clk_legacy_common_init();
+
+	return r;
+}
+
+int __init omap3430_clk_legacy_init(void)
+{
+	int r;
+
+	r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+	r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+	r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+	r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+	omap3_clk_legacy_common_init();
+	omap3_clk_lock_dpll5();
+
+	return r;
+}
+
+int __init omap36xx_clk_legacy_init(void)
+{
+	int r;
+
+	ti_clk_patch_legacy_clks(omap36xx_clk_patches);
+	r = ti_clk_register_legacy_clks(omap36xx_clks);
+	r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+	r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+	r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+	r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+	omap3_clk_legacy_common_init();
+	omap3_clk_lock_dpll5();
+
+	return r;
+}
+
+int __init am35xx_clk_legacy_init(void)
+{
+	int r;
+
+	r = ti_clk_register_legacy_clks(am35xx_clks);
+	r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+	r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+	omap3_clk_legacy_common_init();
+	omap3_clk_lock_dpll5();
+
+	return r;
+}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 0d1750a..383a06e 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -327,7 +327,6 @@
 	OMAP3_SOC_OMAP3430_ES1,
 	OMAP3_SOC_OMAP3430_ES2_PLUS,
 	OMAP3_SOC_OMAP3630,
-	OMAP3_SOC_TI81XX,
 };
 
 static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@
 		(clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
 		(clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
 
-	if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+	if (soc_type != OMAP3_SOC_OMAP3430_ES1)
 		omap3_clk_lock_dpll5();
 
 	return 0;
@@ -390,8 +389,3 @@
 {
 	return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
 }
-
-int __init ti81xx_dt_clk_init(void)
-{
-	return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
-}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 02517a8..4f4c877 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 5e18399..14160b2 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/clk/ti.h>
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 62ac8f6..ee32f4de 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644
index 0000000..9451e65
--- /dev/null
+++ b/drivers/clk/ti/clk-816x.c
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk dm816x_clks[] = {
+	DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
+	DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+	DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+	DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+	DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+	DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+	DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+	DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+	DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+	DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+	DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+	DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+	DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
+	DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+	DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+	DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+	DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
+	DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
+	{ .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+	"ddr_pll_clk1",
+	"ddr_pll_clk2",
+	"ddr_pll_clk3",
+};
+
+int __init ti81xx_dt_clk_init(void)
+{
+	ti_dt_clocks_register(dm816x_clks);
+	omap2_clk_disable_autoidle_all();
+	omap2_clk_enable_init_clocks(enable_init_clks,
+				     ARRAY_SIZE(enable_init_clks));
+
+	return 0;
+}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 337abe5..e22b956 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -183,3 +185,128 @@
 		retries--;
 	}
 }
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
+{
+	while (*patch) {
+		memcpy((*patch)->patch, *patch, sizeof(**patch));
+		patch++;
+	}
+}
+
+struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
+{
+	struct clk *clk;
+	struct ti_clk_fixed *fixed;
+	struct ti_clk_fixed_factor *fixed_factor;
+	struct clk_hw *clk_hw;
+
+	if (setup->clk)
+		return setup->clk;
+
+	switch (setup->type) {
+	case TI_CLK_FIXED:
+		fixed = setup->data;
+
+		clk = clk_register_fixed_rate(NULL, setup->name, NULL,
+					      CLK_IS_ROOT, fixed->frequency);
+		break;
+	case TI_CLK_MUX:
+		clk = ti_clk_register_mux(setup);
+		break;
+	case TI_CLK_DIVIDER:
+		clk = ti_clk_register_divider(setup);
+		break;
+	case TI_CLK_COMPOSITE:
+		clk = ti_clk_register_composite(setup);
+		break;
+	case TI_CLK_FIXED_FACTOR:
+		fixed_factor = setup->data;
+
+		clk = clk_register_fixed_factor(NULL, setup->name,
+						fixed_factor->parent,
+						0, fixed_factor->mult,
+						fixed_factor->div);
+		break;
+	case TI_CLK_GATE:
+		clk = ti_clk_register_gate(setup);
+		break;
+	case TI_CLK_DPLL:
+		clk = ti_clk_register_dpll(setup);
+		break;
+	default:
+		pr_err("bad type for %s!\n", setup->name);
+		clk = ERR_PTR(-EINVAL);
+	}
+
+	if (!IS_ERR(clk)) {
+		setup->clk = clk;
+		if (setup->clkdm_name) {
+			if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+				pr_warn("can't setup clkdm for basic clk %s\n",
+					setup->name);
+			} else {
+				clk_hw = __clk_get_hw(clk);
+				to_clk_hw_omap(clk_hw)->clkdm_name =
+					setup->clkdm_name;
+				omap2_init_clk_clkdm(clk_hw);
+			}
+		}
+	}
+
+	return clk;
+}
+
+int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
+{
+	struct clk *clk;
+	bool retry;
+	struct ti_clk_alias *retry_clk;
+	struct ti_clk_alias *tmp;
+
+	while (clks->clk) {
+		clk = ti_clk_register_clk(clks->clk);
+		if (IS_ERR(clk)) {
+			if (PTR_ERR(clk) == -EAGAIN) {
+				list_add(&clks->link, &retry_list);
+			} else {
+				pr_err("register for %s failed: %ld\n",
+				       clks->clk->name, PTR_ERR(clk));
+				return PTR_ERR(clk);
+			}
+		} else {
+			clks->lk.clk = clk;
+			clkdev_add(&clks->lk);
+		}
+		clks++;
+	}
+
+	retry = true;
+
+	while (!list_empty(&retry_list) && retry) {
+		retry = false;
+		list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
+			pr_debug("retry-init: %s\n", retry_clk->clk->name);
+			clk = ti_clk_register_clk(retry_clk->clk);
+			if (IS_ERR(clk)) {
+				if (PTR_ERR(clk) == -EAGAIN) {
+					continue;
+				} else {
+					pr_err("register for %s failed: %ld\n",
+					       retry_clk->clk->name,
+					       PTR_ERR(clk));
+					return PTR_ERR(clk);
+				}
+			} else {
+				retry = true;
+				retry_clk->lk.clk = clk;
+				clkdev_add(&retry_clk->lk);
+				list_del(&retry_clk->link);
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644
index 0000000..404158d
--- /dev/null
+++ b/drivers/clk/ti/clock.h
@@ -0,0 +1,172 @@
+/*
+ * TI Clock driver internal definitions
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_CLK_TI_CLOCK__
+#define __DRIVERS_CLK_TI_CLOCK__
+
+enum {
+	TI_CLK_FIXED,
+	TI_CLK_MUX,
+	TI_CLK_DIVIDER,
+	TI_CLK_COMPOSITE,
+	TI_CLK_FIXED_FACTOR,
+	TI_CLK_GATE,
+	TI_CLK_DPLL,
+};
+
+/* Global flags */
+#define CLKF_INDEX_POWER_OF_TWO		(1 << 0)
+#define CLKF_INDEX_STARTS_AT_ONE	(1 << 1)
+#define CLKF_SET_RATE_PARENT		(1 << 2)
+#define CLKF_OMAP3			(1 << 3)
+#define CLKF_AM35XX			(1 << 4)
+
+/* Gate flags */
+#define CLKF_SET_BIT_TO_DISABLE		(1 << 5)
+#define CLKF_INTERFACE			(1 << 6)
+#define CLKF_SSI			(1 << 7)
+#define CLKF_DSS			(1 << 8)
+#define CLKF_HSOTGUSB			(1 << 9)
+#define CLKF_WAIT			(1 << 10)
+#define CLKF_NO_WAIT			(1 << 11)
+#define CLKF_HSDIV			(1 << 12)
+#define CLKF_CLKDM			(1 << 13)
+
+/* DPLL flags */
+#define CLKF_LOW_POWER_STOP		(1 << 5)
+#define CLKF_LOCK			(1 << 6)
+#define CLKF_LOW_POWER_BYPASS		(1 << 7)
+#define CLKF_PER			(1 << 8)
+#define CLKF_CORE			(1 << 9)
+#define CLKF_J_TYPE			(1 << 10)
+
+#define CLK(dev, con, ck)		\
+	{				\
+		.lk = {			\
+			.dev_id = dev,	\
+			.con_id = con,	\
+		},			\
+		.clk = ck,		\
+	}
+
+struct ti_clk {
+	const char *name;
+	const char *clkdm_name;
+	int type;
+	void *data;
+	struct ti_clk *patch;
+	struct clk *clk;
+};
+
+struct ti_clk_alias {
+	struct ti_clk *clk;
+	struct clk_lookup lk;
+	struct list_head link;
+};
+
+struct ti_clk_fixed {
+	u32 frequency;
+	u16 flags;
+};
+
+struct ti_clk_mux {
+	u8 bit_shift;
+	int num_parents;
+	u16 reg;
+	u8 module;
+	const char **parents;
+	u16 flags;
+};
+
+struct ti_clk_divider {
+	const char *parent;
+	u8 bit_shift;
+	u16 max_div;
+	u16 reg;
+	u8 module;
+	int *dividers;
+	int num_dividers;
+	u16 flags;
+};
+
+struct ti_clk_fixed_factor {
+	const char *parent;
+	u16 div;
+	u16 mult;
+	u16 flags;
+};
+
+struct ti_clk_gate {
+	const char *parent;
+	u8 bit_shift;
+	u16 reg;
+	u8 module;
+	u16 flags;
+};
+
+struct ti_clk_composite {
+	struct ti_clk_divider *divider;
+	struct ti_clk_mux *mux;
+	struct ti_clk_gate *gate;
+	u16 flags;
+};
+
+struct ti_clk_clkdm_gate {
+	const char *parent;
+	u16 flags;
+};
+
+struct ti_clk_dpll {
+	int num_parents;
+	u16 control_reg;
+	u16 idlest_reg;
+	u16 autoidle_reg;
+	u16 mult_div1_reg;
+	u8 module;
+	const char **parents;
+	u16 flags;
+	u8 modes;
+	u32 mult_mask;
+	u32 div1_mask;
+	u32 enable_mask;
+	u32 autoidle_mask;
+	u32 freqsel_mask;
+	u32 idlest_mask;
+	u32 dco_mask;
+	u32 sddiv_mask;
+	u16 max_multiplier;
+	u16 max_divider;
+	u8 min_divider;
+	u8 auto_recal_bit;
+	u8 recal_en_bit;
+	u8 recal_st_bit;
+};
+
+struct clk *ti_clk_register_gate(struct ti_clk *setup);
+struct clk *ti_clk_register_interface(struct ti_clk *setup);
+struct clk *ti_clk_register_mux(struct ti_clk *setup);
+struct clk *ti_clk_register_divider(struct ti_clk *setup);
+struct clk *ti_clk_register_composite(struct ti_clk *setup);
+struct clk *ti_clk_register_dpll(struct ti_clk *setup);
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
+
+void ti_clk_patch_legacy_clks(struct ti_clk **patch);
+struct clk *ti_clk_register_clk(struct ti_clk *setup);
+int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
+
+#endif
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 19d8980..3654f61 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -23,6 +23,8 @@
 #include <linux/clk/ti.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -116,8 +118,46 @@
 
 #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
 
-static void __init ti_clk_register_composite(struct clk_hw *hw,
-					     struct device_node *node)
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_composite(struct ti_clk *setup)
+{
+	struct ti_clk_composite *comp;
+	struct clk_hw *gate;
+	struct clk_hw *mux;
+	struct clk_hw *div;
+	int num_parents = 1;
+	const char **parent_names = NULL;
+	struct clk *clk;
+
+	comp = setup->data;
+
+	div = ti_clk_build_component_div(comp->divider);
+	gate = ti_clk_build_component_gate(comp->gate);
+	mux = ti_clk_build_component_mux(comp->mux);
+
+	if (div)
+		parent_names = &comp->divider->parent;
+
+	if (gate)
+		parent_names = &comp->gate->parent;
+
+	if (mux) {
+		num_parents = comp->mux->num_parents;
+		parent_names = comp->mux->parents;
+	}
+
+	clk = clk_register_composite(NULL, setup->name,
+				     parent_names, num_parents, mux,
+				     &ti_clk_mux_ops, div,
+				     &ti_composite_divider_ops, gate,
+				     &ti_composite_gate_ops, 0);
+
+	return clk;
+}
+#endif
+
+static void __init _register_composite(struct clk_hw *hw,
+				       struct device_node *node)
 {
 	struct clk *clk;
 	struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@
 			pr_debug("component %s not ready for %s, retry\n",
 				 cclk->comp_nodes[i]->name, node->name);
 			if (!ti_clk_retry_init(node, hw,
-					       ti_clk_register_composite))
+					       _register_composite))
 				return;
 
 			goto cleanup;
@@ -216,7 +256,7 @@
 	for (i = 0; i < num_clks; i++)
 		cclk->comp_nodes[i] = _get_component_node(node, i);
 
-	ti_clk_register_composite(&cclk->hw, node);
+	_register_composite(&cclk->hw, node);
 }
 CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
 	       of_ti_composite_clk_setup);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index bff2b5b..6211893 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -301,6 +302,134 @@
 }
 
 static struct clk_div_table *
+_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
+{
+	int valid_div = 0;
+	struct clk_div_table *table;
+	int i;
+	int div;
+	u32 val;
+	u8 flags;
+
+	if (!setup->num_dividers) {
+		/* Clk divider table not provided, determine min/max divs */
+		flags = setup->flags;
+
+		if (flags & CLKF_INDEX_STARTS_AT_ONE)
+			val = 1;
+		else
+			val = 0;
+
+		div = 1;
+
+		while (div < setup->max_div) {
+			if (flags & CLKF_INDEX_POWER_OF_TWO)
+				div <<= 1;
+			else
+				div++;
+			val++;
+		}
+
+		*width = fls(val);
+
+		return NULL;
+	}
+
+	for (i = 0; i < setup->num_dividers; i++)
+		if (setup->dividers[i])
+			valid_div++;
+
+	table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	valid_div = 0;
+	*width = 0;
+
+	for (i = 0; i < setup->num_dividers; i++)
+		if (setup->dividers[i]) {
+			table[valid_div].div = setup->dividers[i];
+			table[valid_div].val = i;
+			valid_div++;
+			*width = i;
+		}
+
+	*width = fls(*width);
+
+	return table;
+}
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+{
+	struct clk_divider *div;
+	struct clk_omap_reg *reg;
+
+	if (!setup)
+		return NULL;
+
+	div = kzalloc(sizeof(*div), GFP_KERNEL);
+	if (!div)
+		return ERR_PTR(-ENOMEM);
+
+	reg = (struct clk_omap_reg *)&div->reg;
+	reg->index = setup->module;
+	reg->offset = setup->reg;
+
+	if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+		div->flags |= CLK_DIVIDER_ONE_BASED;
+
+	if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
+		div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+	div->table = _get_div_table_from_setup(setup, &div->width);
+
+	div->shift = setup->bit_shift;
+
+	return &div->hw;
+}
+
+struct clk *ti_clk_register_divider(struct ti_clk *setup)
+{
+	struct ti_clk_divider *div;
+	struct clk_omap_reg *reg_setup;
+	u32 reg;
+	u8 width;
+	u32 flags = 0;
+	u8 div_flags = 0;
+	struct clk_div_table *table;
+	struct clk *clk;
+
+	div = setup->data;
+
+	reg_setup = (struct clk_omap_reg *)&reg;
+
+	reg_setup->index = div->module;
+	reg_setup->offset = div->reg;
+
+	if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
+		div_flags |= CLK_DIVIDER_ONE_BASED;
+
+	if (div->flags & CLKF_INDEX_POWER_OF_TWO)
+		div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+	if (div->flags & CLKF_SET_RATE_PARENT)
+		flags |= CLK_SET_RATE_PARENT;
+
+	table = _get_div_table_from_setup(div, &width);
+	if (IS_ERR(table))
+		return (struct clk *)table;
+
+	clk = _register_divider(NULL, setup->name, div->parent,
+				flags, (void __iomem *)reg, div->bit_shift,
+				width, div_flags, table, NULL);
+
+	if (IS_ERR(clk))
+		kfree(table);
+
+	return clk;
+}
+
+static struct clk_div_table *
 __init ti_clk_get_div_table(struct device_node *node)
 {
 	struct clk_div_table *table;
@@ -455,7 +584,8 @@
 		goto cleanup;
 
 	clk = _register_divider(NULL, node->name, parent_name, flags, reg,
-				shift, width, clk_divider_flags, table, NULL);
+				shift, width, clk_divider_flags, table,
+				NULL);
 
 	if (!IS_ERR(clk)) {
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 85ac0dd..81dc469 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@
 };
 
 /**
- * ti_clk_register_dpll - low level registration of a DPLL clock
+ * _register_dpll - low level registration of a DPLL clock
  * @hw: hardware clock definition for the clock
  * @node: device node for the clock
  *
@@ -138,8 +139,8 @@
  * clk-bypass is missing), the clock is added to retry list and
  * the initialization is retried on later stage.
  */
-static void __init ti_clk_register_dpll(struct clk_hw *hw,
-					struct device_node *node)
+static void __init _register_dpll(struct clk_hw *hw,
+				  struct device_node *node)
 {
 	struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
 	struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@
 	if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
 		pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
 			 node->name);
-		if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+		if (!ti_clk_retry_init(node, hw, _register_dpll))
 			return;
 
 		goto cleanup;
@@ -175,20 +176,118 @@
 	kfree(clk_hw);
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __iomem *_get_reg(u8 module, u16 offset)
+{
+	u32 reg;
+	struct clk_omap_reg *reg_setup;
+
+	reg_setup = (struct clk_omap_reg *)&reg;
+
+	reg_setup->index = module;
+	reg_setup->offset = offset;
+
+	return (void __iomem *)reg;
+}
+
+struct clk *ti_clk_register_dpll(struct ti_clk *setup)
+{
+	struct clk_hw_omap *clk_hw;
+	struct clk_init_data init = { NULL };
+	struct dpll_data *dd;
+	struct clk *clk;
+	struct ti_clk_dpll *dpll;
+	const struct clk_ops *ops = &omap3_dpll_ck_ops;
+	struct clk *clk_ref;
+	struct clk *clk_bypass;
+
+	dpll = setup->data;
+
+	if (dpll->num_parents < 2)
+		return ERR_PTR(-EINVAL);
+
+	clk_ref = clk_get_sys(NULL, dpll->parents[0]);
+	clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
+
+	if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
+		return ERR_PTR(-EAGAIN);
+
+	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+	if (!dd || !clk_hw) {
+		clk = ERR_PTR(-ENOMEM);
+		goto cleanup;
+	}
+
+	clk_hw->dpll_data = dd;
+	clk_hw->ops = &clkhwops_omap3_dpll;
+	clk_hw->hw.init = &init;
+	clk_hw->flags = MEMMAP_ADDRESSING;
+
+	init.name = setup->name;
+	init.ops = ops;
+
+	init.num_parents = dpll->num_parents;
+	init.parent_names = dpll->parents;
+
+	dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
+	dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
+	dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
+	dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
+
+	dd->modes = dpll->modes;
+	dd->div1_mask = dpll->div1_mask;
+	dd->idlest_mask = dpll->idlest_mask;
+	dd->mult_mask = dpll->mult_mask;
+	dd->autoidle_mask = dpll->autoidle_mask;
+	dd->enable_mask = dpll->enable_mask;
+	dd->sddiv_mask = dpll->sddiv_mask;
+	dd->dco_mask = dpll->dco_mask;
+	dd->max_divider = dpll->max_divider;
+	dd->min_divider = dpll->min_divider;
+	dd->max_multiplier = dpll->max_multiplier;
+	dd->auto_recal_bit = dpll->auto_recal_bit;
+	dd->recal_en_bit = dpll->recal_en_bit;
+	dd->recal_st_bit = dpll->recal_st_bit;
+
+	dd->clk_ref = clk_ref;
+	dd->clk_bypass = clk_bypass;
+
+	if (dpll->flags & CLKF_CORE)
+		ops = &omap3_dpll_core_ck_ops;
+
+	if (dpll->flags & CLKF_PER)
+		ops = &omap3_dpll_per_ck_ops;
+
+	if (dpll->flags & CLKF_J_TYPE)
+		dd->flags |= DPLL_J_TYPE;
+
+	clk = clk_register(NULL, &clk_hw->hw);
+
+	if (!IS_ERR(clk))
+		return clk;
+
+cleanup:
+	kfree(dd);
+	kfree(clk_hw);
+	return clk;
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
 	defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
 	defined(CONFIG_SOC_AM43XX)
 /**
- * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * _register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
  * @ops: clk_ops for this clock
  * @hw_ops: clk_hw_ops for this clock
  *
  * Initializes a DPLL x 2 clock from device tree data.
  */
-static void ti_clk_register_dpll_x2(struct device_node *node,
-				    const struct clk_ops *ops,
-				    const struct clk_hw_omap_ops *hw_ops)
+static void _register_dpll_x2(struct device_node *node,
+			      const struct clk_ops *ops,
+			      const struct clk_hw_omap_ops *hw_ops)
 {
 	struct clk *clk;
 	struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@
 	if (dpll_mode)
 		dd->modes = dpll_mode;
 
-	ti_clk_register_dpll(&clk_hw->hw, node);
+	_register_dpll(&clk_hw->hw, node);
 	return;
 
 cleanup:
@@ -332,7 +431,7 @@
 	defined(CONFIG_SOC_DRA7XX)
 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
 {
-	ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+	_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
 }
 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
 	       of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@
 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
-	ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+	_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
 }
 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
 	       of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644
index 0000000..d216406
--- /dev/null
+++ b/drivers/clk/ti/fapll.c
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <asm/div64.h>
+
+/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_LOCK		BIT(7)
+#define FAPLL_MAIN_PLLEN	BIT(3)
+#define FAPLL_MAIN_BP		BIT(2)
+#define FAPLL_MAIN_LOC_CTL	BIT(0)
+
+/* FAPLL powerdown register PWD */
+#define FAPLL_PWD_OFFSET	4
+
+#define MAX_FAPLL_OUTPUTS	7
+#define FAPLL_MAX_RETRIES	1000
+
+#define to_fapll(_hw)		container_of(_hw, struct fapll_data, hw)
+#define to_synth(_hw)		container_of(_hw, struct fapll_synth, hw)
+
+/* The bypass bit is inverted on the ddr_pll.. */
+#define fapll_is_ddr_pll(va)	(((u32)(va) & 0xffff) == 0x0440)
+
+/*
+ * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
+ * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
+ */
+#define is_ddr_pll_clk1(va)	(((u32)(va) & 0xffff) == 0x044c)
+#define is_audio_pll_clk1(va)	(((u32)(va) & 0xffff) == 0x04a8)
+
+/* Synthesizer divider register */
+#define SYNTH_LDMDIV1		BIT(8)
+
+/* Synthesizer frequency register */
+#define SYNTH_LDFREQ		BIT(31)
+
+struct fapll_data {
+	struct clk_hw hw;
+	void __iomem *base;
+	const char *name;
+	struct clk *clk_ref;
+	struct clk *clk_bypass;
+	struct clk_onecell_data outputs;
+	bool bypass_bit_inverted;
+};
+
+struct fapll_synth {
+	struct clk_hw hw;
+	struct fapll_data *fd;
+	int index;
+	void __iomem *freq;
+	void __iomem *div;
+	const char *name;
+	struct clk *clk_pll;
+};
+
+static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
+{
+	u32 v = readl_relaxed(fd->base);
+
+	if (fd->bypass_bit_inverted)
+		return !(v & FAPLL_MAIN_BP);
+	else
+		return !!(v & FAPLL_MAIN_BP);
+}
+
+static int ti_fapll_enable(struct clk_hw *hw)
+{
+	struct fapll_data *fd = to_fapll(hw);
+	u32 v = readl_relaxed(fd->base);
+
+	v |= FAPLL_MAIN_PLLEN;
+	writel_relaxed(v, fd->base);
+
+	return 0;
+}
+
+static void ti_fapll_disable(struct clk_hw *hw)
+{
+	struct fapll_data *fd = to_fapll(hw);
+	u32 v = readl_relaxed(fd->base);
+
+	v &= ~FAPLL_MAIN_PLLEN;
+	writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_is_enabled(struct clk_hw *hw)
+{
+	struct fapll_data *fd = to_fapll(hw);
+	u32 v = readl_relaxed(fd->base);
+
+	return v & FAPLL_MAIN_PLLEN;
+}
+
+static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct fapll_data *fd = to_fapll(hw);
+	u32 fapll_n, fapll_p, v;
+	long long rate;
+
+	if (ti_fapll_clock_is_bypass(fd))
+		return parent_rate;
+
+	rate = parent_rate;
+
+	/* PLL pre-divider is P and multiplier is N */
+	v = readl_relaxed(fd->base);
+	fapll_p = (v >> 8) & 0xff;
+	if (fapll_p)
+		do_div(rate, fapll_p);
+	fapll_n = v >> 16;
+	if (fapll_n)
+		rate *= fapll_n;
+
+	return rate;
+}
+
+static u8 ti_fapll_get_parent(struct clk_hw *hw)
+{
+	struct fapll_data *fd = to_fapll(hw);
+
+	if (ti_fapll_clock_is_bypass(fd))
+		return 1;
+
+	return 0;
+}
+
+static struct clk_ops ti_fapll_ops = {
+	.enable = ti_fapll_enable,
+	.disable = ti_fapll_disable,
+	.is_enabled = ti_fapll_is_enabled,
+	.recalc_rate = ti_fapll_recalc_rate,
+	.get_parent = ti_fapll_get_parent,
+};
+
+static int ti_fapll_synth_enable(struct clk_hw *hw)
+{
+	struct fapll_synth *synth = to_synth(hw);
+	u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+	v &= ~(1 << synth->index);
+	writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+
+	return 0;
+}
+
+static void ti_fapll_synth_disable(struct clk_hw *hw)
+{
+	struct fapll_synth *synth = to_synth(hw);
+	u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+	v |= 1 << synth->index;
+	writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+}
+
+static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
+{
+	struct fapll_synth *synth = to_synth(hw);
+	u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+	return !(v & (1 << synth->index));
+}
+
+/*
+ * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
+ */
+static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct fapll_synth *synth = to_synth(hw);
+	u32 synth_div_m;
+	long long rate;
+
+	/* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
+	if (!synth->div)
+		return 32768;
+
+	/*
+	 * PLL in bypass sets the synths in bypass mode too. The PLL rate
+	 * can be also be set to 27MHz, so we can't use parent_rate to
+	 * check for bypass mode.
+	 */
+	if (ti_fapll_clock_is_bypass(synth->fd))
+		return parent_rate;
+
+	rate = parent_rate;
+
+	/*
+	 * Synth frequency integer and fractional divider.
+	 * Note that the phase output K is 8, so the result needs
+	 * to be multiplied by 8.
+	 */
+	if (synth->freq) {
+		u32 v, synth_int_div, synth_frac_div, synth_div_freq;
+
+		v = readl_relaxed(synth->freq);
+		synth_int_div = (v >> 24) & 0xf;
+		synth_frac_div = v & 0xffffff;
+		synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
+		rate *= 10000000;
+		do_div(rate, synth_div_freq);
+		rate *= 8;
+	}
+
+	/* Synth ost-divider M */
+	synth_div_m = readl_relaxed(synth->div) & 0xff;
+	do_div(rate, synth_div_m);
+
+	return rate;
+}
+
+static struct clk_ops ti_fapll_synt_ops = {
+	.enable = ti_fapll_synth_enable,
+	.disable = ti_fapll_synth_disable,
+	.is_enabled = ti_fapll_synth_is_enabled,
+	.recalc_rate = ti_fapll_synth_recalc_rate,
+};
+
+static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
+						void __iomem *freq,
+						void __iomem *div,
+						int index,
+						const char *name,
+						const char *parent,
+						struct clk *pll_clk)
+{
+	struct clk_init_data *init;
+	struct fapll_synth *synth;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		return ERR_PTR(-ENOMEM);
+
+	init->ops = &ti_fapll_synt_ops;
+	init->name = name;
+	init->parent_names = &parent;
+	init->num_parents = 1;
+
+	synth = kzalloc(sizeof(*synth), GFP_KERNEL);
+	if (!synth)
+		goto free;
+
+	synth->fd = fd;
+	synth->index = index;
+	synth->freq = freq;
+	synth->div = div;
+	synth->name = name;
+	synth->hw.init = init;
+	synth->clk_pll = pll_clk;
+
+	return clk_register(NULL, &synth->hw);
+
+free:
+	kfree(synth);
+	kfree(init);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static void __init ti_fapll_setup(struct device_node *node)
+{
+	struct fapll_data *fd;
+	struct clk_init_data *init = NULL;
+	const char *parent_name[2];
+	struct clk *pll_clk;
+	int i;
+
+	fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+	if (!fd)
+		return;
+
+	fd->outputs.clks = kzalloc(sizeof(struct clk *) *
+				   MAX_FAPLL_OUTPUTS + 1,
+				   GFP_KERNEL);
+	if (!fd->outputs.clks)
+		goto free;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		goto free;
+
+	init->ops = &ti_fapll_ops;
+	init->name = node->name;
+
+	init->num_parents = of_clk_get_parent_count(node);
+	if (init->num_parents != 2) {
+		pr_err("%s must have two parents\n", node->name);
+		goto free;
+	}
+
+	parent_name[0] = of_clk_get_parent_name(node, 0);
+	parent_name[1] = of_clk_get_parent_name(node, 1);
+	init->parent_names = parent_name;
+
+	fd->clk_ref = of_clk_get(node, 0);
+	if (IS_ERR(fd->clk_ref)) {
+		pr_err("%s could not get clk_ref\n", node->name);
+		goto free;
+	}
+
+	fd->clk_bypass = of_clk_get(node, 1);
+	if (IS_ERR(fd->clk_bypass)) {
+		pr_err("%s could not get clk_bypass\n", node->name);
+		goto free;
+	}
+
+	fd->base = of_iomap(node, 0);
+	if (!fd->base) {
+		pr_err("%s could not get IO base\n", node->name);
+		goto free;
+	}
+
+	if (fapll_is_ddr_pll(fd->base))
+		fd->bypass_bit_inverted = true;
+
+	fd->name = node->name;
+	fd->hw.init = init;
+
+	/* Register the parent PLL */
+	pll_clk = clk_register(NULL, &fd->hw);
+	if (IS_ERR(pll_clk))
+		goto unmap;
+
+	fd->outputs.clks[0] = pll_clk;
+	fd->outputs.clk_num++;
+
+	/*
+	 * Set up the child synthesizers starting at index 1 as the
+	 * PLL output is at index 0. We need to check the clock-indices
+	 * for numbering in case there are holes in the synth mapping,
+	 * and then probe the synth register to see if it has a FREQ
+	 * register available.
+	 */
+	for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
+		const char *output_name;
+		void __iomem *freq, *div;
+		struct clk *synth_clk;
+		int output_instance;
+		u32 v;
+
+		if (of_property_read_string_index(node, "clock-output-names",
+						  i, &output_name))
+			continue;
+
+		if (of_property_read_u32_index(node, "clock-indices", i,
+					       &output_instance))
+			output_instance = i;
+
+		freq = fd->base + (output_instance * 8);
+		div = freq + 4;
+
+		/* Check for hardwired audio_pll_clk1 */
+		if (is_audio_pll_clk1(freq)) {
+			freq = 0;
+			div = 0;
+		} else {
+			/* Does the synthesizer have a FREQ register? */
+			v = readl_relaxed(freq);
+			if (!v)
+				freq = 0;
+		}
+		synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
+						 output_name, node->name,
+						 pll_clk);
+		if (IS_ERR(synth_clk))
+			continue;
+
+		fd->outputs.clks[output_instance] = synth_clk;
+		fd->outputs.clk_num++;
+
+		clk_register_clkdev(synth_clk, output_name, NULL);
+	}
+
+	/* Register the child synthesizers as the FAPLL outputs */
+	of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
+	/* Add clock alias for the outputs */
+
+	kfree(init);
+
+	return;
+
+unmap:
+	iounmap(fd->base);
+free:
+	if (fd->clk_bypass)
+		clk_put(fd->clk_bypass);
+	if (fd->clk_ref)
+		clk_put(fd->clk_ref);
+	kfree(fd->outputs.clks);
+	kfree(fd);
+	kfree(init);
+}
+
+CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index b326d27..d493307 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 #undef pr_fmt
@@ -90,63 +92,164 @@
 	return ret;
 }
 
+static struct clk *_register_gate(struct device *dev, const char *name,
+				  const char *parent_name, unsigned long flags,
+				  void __iomem *reg, u8 bit_idx,
+				  u8 clk_gate_flags, const struct clk_ops *ops,
+				  const struct clk_hw_omap_ops *hw_ops)
+{
+	struct clk_init_data init = { NULL };
+	struct clk_hw_omap *clk_hw;
+	struct clk *clk;
+
+	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+	if (!clk_hw)
+		return ERR_PTR(-ENOMEM);
+
+	clk_hw->hw.init = &init;
+
+	init.name = name;
+	init.ops = ops;
+
+	clk_hw->enable_reg = reg;
+	clk_hw->enable_bit = bit_idx;
+	clk_hw->ops = hw_ops;
+
+	clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
+
+	init.parent_names = &parent_name;
+	init.num_parents = 1;
+
+	init.flags = flags;
+
+	clk = clk_register(NULL, &clk_hw->hw);
+
+	if (IS_ERR(clk))
+		kfree(clk_hw);
+
+	return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_gate(struct ti_clk *setup)
+{
+	const struct clk_ops *ops = &omap_gate_clk_ops;
+	const struct clk_hw_omap_ops *hw_ops = NULL;
+	u32 reg;
+	struct clk_omap_reg *reg_setup;
+	u32 flags = 0;
+	u8 clk_gate_flags = 0;
+	struct ti_clk_gate *gate;
+
+	gate = setup->data;
+
+	if (gate->flags & CLKF_INTERFACE)
+		return ti_clk_register_interface(setup);
+
+	reg_setup = (struct clk_omap_reg *)&reg;
+
+	if (gate->flags & CLKF_SET_RATE_PARENT)
+		flags |= CLK_SET_RATE_PARENT;
+
+	if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
+		clk_gate_flags |= INVERT_ENABLE;
+
+	if (gate->flags & CLKF_HSDIV) {
+		ops = &omap_gate_clk_hsdiv_restore_ops;
+		hw_ops = &clkhwops_wait;
+	}
+
+	if (gate->flags & CLKF_DSS)
+		hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
+
+	if (gate->flags & CLKF_WAIT)
+		hw_ops = &clkhwops_wait;
+
+	if (gate->flags & CLKF_CLKDM)
+		ops = &omap_gate_clkdm_clk_ops;
+
+	if (gate->flags & CLKF_AM35XX)
+		hw_ops = &clkhwops_am35xx_ipss_module_wait;
+
+	reg_setup->index = gate->module;
+	reg_setup->offset = gate->reg;
+
+	return _register_gate(NULL, setup->name, gate->parent, flags,
+			      (void __iomem *)reg, gate->bit_shift,
+			      clk_gate_flags, ops, hw_ops);
+}
+
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
+{
+	struct clk_hw_omap *gate;
+	struct clk_omap_reg *reg;
+	const struct clk_hw_omap_ops *ops = &clkhwops_wait;
+
+	if (!setup)
+		return NULL;
+
+	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+	if (!gate)
+		return ERR_PTR(-ENOMEM);
+
+	reg = (struct clk_omap_reg *)&gate->enable_reg;
+	reg->index = setup->module;
+	reg->offset = setup->reg;
+
+	gate->enable_bit = setup->bit_shift;
+
+	if (setup->flags & CLKF_NO_WAIT)
+		ops = NULL;
+
+	if (setup->flags & CLKF_INTERFACE)
+		ops = &clkhwops_iclk_wait;
+
+	gate->ops = ops;
+	gate->flags = MEMMAP_ADDRESSING;
+
+	return &gate->hw;
+}
+#endif
+
 static void __init _of_ti_gate_clk_setup(struct device_node *node,
 					 const struct clk_ops *ops,
 					 const struct clk_hw_omap_ops *hw_ops)
 {
 	struct clk *clk;
-	struct clk_init_data init = { NULL };
-	struct clk_hw_omap *clk_hw;
-	const char *clk_name = node->name;
 	const char *parent_name;
+	void __iomem *reg = NULL;
+	u8 enable_bit = 0;
 	u32 val;
-
-	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
-	if (!clk_hw)
-		return;
-
-	clk_hw->hw.init = &init;
-
-	init.name = clk_name;
-	init.ops = ops;
+	u32 flags = 0;
+	u8 clk_gate_flags = 0;
 
 	if (ops != &omap_gate_clkdm_clk_ops) {
-		clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-		if (!clk_hw->enable_reg)
-			goto cleanup;
+		reg = ti_clk_get_reg_addr(node, 0);
+		if (!reg)
+			return;
 
 		if (!of_property_read_u32(node, "ti,bit-shift", &val))
-			clk_hw->enable_bit = val;
+			enable_bit = val;
 	}
 
-	clk_hw->ops = hw_ops;
-
-	clk_hw->flags = MEMMAP_ADDRESSING;
-
 	if (of_clk_get_parent_count(node) != 1) {
-		pr_err("%s must have 1 parent\n", clk_name);
-		goto cleanup;
+		pr_err("%s must have 1 parent\n", node->name);
+		return;
 	}
 
 	parent_name = of_clk_get_parent_name(node, 0);
-	init.parent_names = &parent_name;
-	init.num_parents = 1;
 
 	if (of_property_read_bool(node, "ti,set-rate-parent"))
-		init.flags |= CLK_SET_RATE_PARENT;
+		flags |= CLK_SET_RATE_PARENT;
 
 	if (of_property_read_bool(node, "ti,set-bit-to-disable"))
-		clk_hw->flags |= INVERT_ENABLE;
+		clk_gate_flags |= INVERT_ENABLE;
 
-	clk = clk_register(NULL, &clk_hw->hw);
+	clk = _register_gate(NULL, node->name, parent_name, flags, reg,
+			     enable_bit, clk_gate_flags, ops, hw_ops);
 
-	if (!IS_ERR(clk)) {
+	if (!IS_ERR(clk))
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
-		return;
-	}
-
-cleanup:
-	kfree(clk_hw);
 }
 
 static void __init
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 9c3e8c4..265d91f 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@
 	.is_enabled	= &omap2_dflt_clk_is_enabled,
 };
 
-static void __init _of_ti_interface_clk_setup(struct device_node *node,
-					      const struct clk_hw_omap_ops *ops)
+static struct clk *_register_interface(struct device *dev, const char *name,
+				       const char *parent_name,
+				       void __iomem *reg, u8 bit_idx,
+				       const struct clk_hw_omap_ops *ops)
 {
-	struct clk *clk;
 	struct clk_init_data init = { NULL };
 	struct clk_hw_omap *clk_hw;
-	const char *parent_name;
-	u32 val;
+	struct clk *clk;
 
 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
 	if (!clk_hw)
-		return;
+		return ERR_PTR(-ENOMEM);
 
 	clk_hw->hw.init = &init;
 	clk_hw->ops = ops;
 	clk_hw->flags = MEMMAP_ADDRESSING;
+	clk_hw->enable_reg = reg;
+	clk_hw->enable_bit = bit_idx;
 
-	clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-	if (!clk_hw->enable_reg)
-		goto cleanup;
-
-	if (!of_property_read_u32(node, "ti,bit-shift", &val))
-		clk_hw->enable_bit = val;
-
-	init.name = node->name;
+	init.name = name;
 	init.ops = &ti_interface_clk_ops;
 	init.flags = 0;
 
-	parent_name = of_clk_get_parent_name(node, 0);
-	if (!parent_name) {
-		pr_err("%s must have a parent\n", node->name);
-		goto cleanup;
-	}
-
 	init.num_parents = 1;
 	init.parent_names = &parent_name;
 
 	clk = clk_register(NULL, &clk_hw->hw);
 
-	if (!IS_ERR(clk)) {
-		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	if (IS_ERR(clk))
+		kfree(clk_hw);
+	else
 		omap2_init_clk_hw_omap_clocks(clk);
+
+	return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_interface(struct ti_clk *setup)
+{
+	const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
+	u32 reg;
+	struct clk_omap_reg *reg_setup;
+	struct ti_clk_gate *gate;
+
+	gate = setup->data;
+	reg_setup = (struct clk_omap_reg *)&reg;
+	reg_setup->index = gate->module;
+	reg_setup->offset = gate->reg;
+
+	if (gate->flags & CLKF_NO_WAIT)
+		ops = &clkhwops_iclk;
+
+	if (gate->flags & CLKF_HSOTGUSB)
+		ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+	if (gate->flags & CLKF_DSS)
+		ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+
+	if (gate->flags & CLKF_SSI)
+		ops = &clkhwops_omap3430es2_iclk_ssi_wait;
+
+	if (gate->flags & CLKF_AM35XX)
+		ops = &clkhwops_am35xx_ipss_wait;
+
+	return _register_interface(NULL, setup->name, gate->parent,
+				   (void __iomem *)reg, gate->bit_shift, ops);
+}
+#endif
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+					      const struct clk_hw_omap_ops *ops)
+{
+	struct clk *clk;
+	const char *parent_name;
+	void __iomem *reg;
+	u8 enable_bit = 0;
+	u32 val;
+
+	reg = ti_clk_get_reg_addr(node, 0);
+	if (!reg)
+		return;
+
+	if (!of_property_read_u32(node, "ti,bit-shift", &val))
+		enable_bit = val;
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	if (!parent_name) {
+		pr_err("%s must have a parent\n", node->name);
 		return;
 	}
 
-cleanup:
-	kfree(clk_hw);
+	clk = _register_interface(NULL, node->name, parent_name, reg,
+				  enable_bit, ops);
+
+	if (!IS_ERR(clk))
+		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 }
 
 static void __init of_ti_interface_clk_setup(struct device_node *node)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index e9d650e..728e253 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@
 	return clk;
 }
 
+struct clk *ti_clk_register_mux(struct ti_clk *setup)
+{
+	struct ti_clk_mux *mux;
+	u32 flags;
+	u8 mux_flags = 0;
+	struct clk_omap_reg *reg_setup;
+	u32 reg;
+	u32 mask;
+
+	reg_setup = (struct clk_omap_reg *)&reg;
+
+	mux = setup->data;
+	flags = CLK_SET_RATE_NO_REPARENT;
+
+	mask = mux->num_parents;
+	if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
+		mask--;
+
+	mask = (1 << fls(mask)) - 1;
+	reg_setup->index = mux->module;
+	reg_setup->offset = mux->reg;
+
+	if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
+		mux_flags |= CLK_MUX_INDEX_ONE;
+
+	if (mux->flags & CLKF_SET_RATE_PARENT)
+		flags |= CLK_SET_RATE_PARENT;
+
+	return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
+			     flags, (void __iomem *)reg, mux->bit_shift, mask,
+			     mux_flags, NULL, NULL);
+}
+
 /**
  * of_mux_clk_setup - Setup function for simple mux rate clock
  * @node: DT node for the clock
@@ -194,8 +228,9 @@
 
 	mask = (1 << fls(mask)) - 1;
 
-	clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
-			    reg, shift, mask, clk_mux_flags, NULL, NULL);
+	clk = _register_mux(NULL, node->name, parent_names, num_parents,
+			    flags, reg, shift, mask, clk_mux_flags, NULL,
+			    NULL);
 
 	if (!IS_ERR(clk))
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@
 }
 CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
 
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
+{
+	struct clk_mux *mux;
+	struct clk_omap_reg *reg;
+	int num_parents;
+
+	if (!setup)
+		return NULL;
+
+	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return ERR_PTR(-ENOMEM);
+
+	reg = (struct clk_omap_reg *)&mux->reg;
+
+	mux->shift = setup->bit_shift;
+
+	reg->index = setup->module;
+	reg->offset = setup->reg;
+
+	if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+		mux->flags |= CLK_MUX_INDEX_ONE;
+
+	num_parents = setup->num_parents;
+
+	mux->mask = num_parents - 1;
+	mux->mask = (1 << fls(mux->mask)) - 1;
+
+	return &mux->hw;
+}
+
 static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
 {
 	struct clk_mux *mux;
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index bd4769a..0e95076 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/err.h>
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index e2d63bc..bf63c96 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/slab.h>
 #include <linux/io.h>
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 9037beb..f870aad 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -303,6 +303,7 @@
 	clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
 			"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
 			26, 0, &armclk_lock);
+	clk_prepare_enable(clks[cpu_2x]);
 
 	clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
 			4 + 2 * tmp);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 1c2506f..68161f7 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -63,6 +63,11 @@
 config CADENCE_TTC_TIMER
 	bool
 
+config ASM9260_TIMER
+	bool
+	select CLKSRC_MMIO
+	select CLKSRC_OF
+
 config CLKSRC_NOMADIK_MTU
 	bool
 	depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -245,15 +250,4 @@
 	help
 	  This enables OST0 support available on PXA and SA-11x0
 	  platforms.
-
-config ASM9260_TIMER
-	bool "Alphascale ASM9260 timer driver"
-	depends on GENERIC_CLOCKEVENTS
-	select CLKSRC_MMIO
-	select CLKSRC_OF
-	default y if MACH_ASM9260
-	help
-	  This enables build of a clocksource and clockevent driver for
-	  the 32-bit System Timer hardware available on a Alphascale ASM9260.
-
 endmenu
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 32a3d25..68ab423 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -224,6 +224,8 @@
 	}
 	rate = clk_get_rate(clk);
 
+	mtk_timer_global_reset(evt);
+
 	if (request_irq(evt->dev.irq, mtk_timer_interrupt,
 			IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
 		pr_warn("failed to setup irq %d\n", evt->dev.irq);
@@ -232,8 +234,6 @@
 
 	evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
-	mtk_timer_global_reset(evt);
-
 	/* Configure clock source */
 	mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
 	clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
@@ -241,10 +241,11 @@
 
 	/* Configure clock event */
 	mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-	mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
 	clockevents_config_and_register(&evt->dev, rate, 0x3,
 					0xffffffff);
+
+	mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+
 	return;
 
 err_clk_disable:
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 941f3f3..d9438af 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -163,7 +163,7 @@
 	.dev_id		= &ckevt_pxa_osmr0,
 };
 
-static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
 {
 	timer_writel(0, OIER);
 	timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index bba62f9..ec57ba2 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -225,12 +225,12 @@
 	clock_event_ddata.base = base;
 	clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
 
-	setup_irq(irq, &efm32_clock_event_irq);
-
 	clockevents_config_and_register(&clock_event_ddata.evtdev,
 					DIV_ROUND_CLOSEST(rate, 1024),
 					0xf, 0xffff);
 
+	setup_irq(irq, &efm32_clock_event_irq);
+
 	return 0;
 
 err_get_irq:
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0226844..5dcbf90 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -178,10 +178,6 @@
 
 	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
-	ret = setup_irq(irq, &sun5i_timer_irq);
-	if (ret)
-		pr_warn("failed to setup irq %d\n", irq);
-
 	/* Enable timer0 interrupt */
 	val = readl(timer_base + TIMER_IRQ_EN_REG);
 	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +187,10 @@
 
 	clockevents_config_and_register(&sun5i_clockevent, rate,
 					TIMER_SYNC_TICKS, 0xffffffff);
+
+	ret = setup_irq(irq, &sun5i_timer_irq);
+	if (ret)
+		pr_warn("failed to setup irq %d\n", irq);
 }
 CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
 		       sun5i_timer_init);
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 72564b7..7ea2441 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -26,7 +26,7 @@
 config PPC_CORENET_CPUFREQ
 	tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
 	depends on PPC_E500MC && OF && COMMON_CLK
-	select CLK_PPC_CORENET
+	select CLK_QORIQ
 	help
 	  This adds the CPUFreq driver support for Freescale e500mc,
 	  e5500 and e6500 series SoCs which are capable of changing
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 5e98c6b..82d2fbb 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -159,7 +159,7 @@
 
 static int exynos_cpufreq_probe(struct platform_device *pdev)
 {
-	struct device_node *cpus, *np;
+	struct device_node *cpu0;
 	int ret = -EINVAL;
 
 	exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -206,28 +206,19 @@
 	if (ret)
 		goto err_cpufreq_reg;
 
-	cpus = of_find_node_by_path("/cpus");
-	if (!cpus) {
-		pr_err("failed to find cpus node\n");
+	cpu0 = of_get_cpu_node(0, NULL);
+	if (!cpu0) {
+		pr_err("failed to find cpu0 node\n");
 		return 0;
 	}
 
-	np = of_get_next_child(cpus, NULL);
-	if (!np) {
-		pr_err("failed to find cpus child node\n");
-		of_node_put(cpus);
-		return 0;
-	}
-
-	if (of_find_property(np, "#cooling-cells", NULL)) {
-		cdev = of_cpufreq_cooling_register(np,
+	if (of_find_property(cpu0, "#cooling-cells", NULL)) {
+		cdev = of_cpufreq_cooling_register(cpu0,
 						   cpu_present_mask);
 		if (IS_ERR(cdev))
 			pr_err("running cpufreq without cooling device: %ld\n",
 			       PTR_ERR(cdev));
 	}
-	of_node_put(np);
-	of_node_put(cpus);
 
 	return 0;
 
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index bee5df7..7cb4b76 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -22,6 +22,8 @@
 #include <linux/smp.h>
 #include <sysdev/fsl_soc.h>
 
+#include <asm/smp.h>	/* for get_hard_smp_processor_id() in UP configs */
+
 /**
  * struct cpu_data - per CPU data struct
  * @parent: the parent node of cpu clock
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 2fd53ea..d6d4257 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -263,7 +263,7 @@
 }
 
 #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
 {
 	int count, v, i, found;
 	struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@
 	.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
 };
 
-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
 	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
 	struct cpufreq_frequency_table *pos;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index d00f1ce..733aa51 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -144,11 +144,6 @@
 	(cfg->info->set_fvco)(cfg);
 }
 
-static inline void s3c_cpufreq_resume_clocks(void)
-{
-	cpu_cur.info->resume_clocks();
-}
-
 static inline void s3c_cpufreq_updateclk(struct clk *clk,
 					 unsigned int freq)
 {
@@ -417,9 +412,6 @@
 
 	last_target = ~0;	/* invalidate last_target setting */
 
-	/* first, find out what speed we resumed at. */
-	s3c_cpufreq_resume_clocks();
-
 	/* whilst we will be called later on, we try and re-set the
 	 * cpu frequencies as soon as possible so that we do not end
 	 * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@
 };
 
 
-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
 	if (!info || !info->name) {
 		printk(KERN_ERR "%s: failed to pass valid information\n",
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index aedec09..5937207 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -13,6 +13,7 @@
 #include <linux/notifier.h>
 #include <linux/clockchips.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
@@ -158,70 +159,83 @@
 	struct device_node *power_mgt;
 	int nr_idle_states = 1; /* Snooze */
 	int dt_idle_states;
-	const __be32 *idle_state_flags;
-	const __be32 *idle_state_latency;
-	u32 len_flags, flags, latency_ns;
-	int i;
+	u32 *latency_ns, *residency_ns, *flags;
+	int i, rc;
 
 	/* Currently we have snooze statically defined */
 
 	power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
 	if (!power_mgt) {
 		pr_warn("opal: PowerMgmt Node not found\n");
-		return nr_idle_states;
+		goto out;
 	}
 
-	idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
-	if (!idle_state_flags) {
-		pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
-		return nr_idle_states;
+	/* Read values of any property to determine the num of idle states */
+	dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
+	if (dt_idle_states < 0) {
+		pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+		goto out;
 	}
 
-	idle_state_latency = of_get_property(power_mgt,
-			"ibm,cpu-idle-state-latencies-ns", NULL);
-	if (!idle_state_latency) {
-		pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
-		return nr_idle_states;
+	flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+	if (of_property_read_u32_array(power_mgt,
+			"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+		pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
+		goto out_free_flags;
 	}
 
-	dt_idle_states = len_flags / sizeof(u32);
+	latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
+	rc = of_property_read_u32_array(power_mgt,
+		"ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
+	if (rc) {
+		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+		goto out_free_latency;
+	}
+
+	residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+	rc = of_property_read_u32_array(power_mgt,
+		"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
 
 	for (i = 0; i < dt_idle_states; i++) {
 
-		flags = be32_to_cpu(idle_state_flags[i]);
-
-		/* Cpuidle accepts exit_latency in us and we estimate
-		 * target residency to be 10x exit_latency
+		/*
+		 * Cpuidle accepts exit_latency and target_residency in us.
+		 * Use default target_residency values if f/w does not expose it.
 		 */
-		latency_ns = be32_to_cpu(idle_state_latency[i]);
-		if (flags & OPAL_PM_NAP_ENABLED) {
+		if (flags[i] & OPAL_PM_NAP_ENABLED) {
 			/* Add NAP state */
 			strcpy(powernv_states[nr_idle_states].name, "Nap");
 			strcpy(powernv_states[nr_idle_states].desc, "Nap");
 			powernv_states[nr_idle_states].flags = 0;
-			powernv_states[nr_idle_states].exit_latency =
-					((unsigned int)latency_ns) / 1000;
-			powernv_states[nr_idle_states].target_residency =
-					((unsigned int)latency_ns / 100);
+			powernv_states[nr_idle_states].target_residency = 100;
 			powernv_states[nr_idle_states].enter = &nap_loop;
-			nr_idle_states++;
-		}
-
-		if (flags & OPAL_PM_SLEEP_ENABLED ||
-			flags & OPAL_PM_SLEEP_ENABLED_ER1) {
+		} else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+			flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
 			/* Add FASTSLEEP state */
 			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
 			strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
 			powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
-			powernv_states[nr_idle_states].exit_latency =
-					((unsigned int)latency_ns) / 1000;
-			powernv_states[nr_idle_states].target_residency =
-					((unsigned int)latency_ns / 100);
+			powernv_states[nr_idle_states].target_residency = 300000;
 			powernv_states[nr_idle_states].enter = &fastsleep_loop;
-			nr_idle_states++;
 		}
+
+		powernv_states[nr_idle_states].exit_latency =
+				((unsigned int)latency_ns[i]) / 1000;
+
+		if (!rc) {
+			powernv_states[nr_idle_states].target_residency =
+				((unsigned int)residency_ns[i]) / 1000;
+		}
+
+		nr_idle_states++;
 	}
 
+	kfree(residency_ns);
+out_free_latency:
+	kfree(latency_ns);
+out_free_flags:
+	kfree(flags);
+out:
 	return nr_idle_states;
 }
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 4d53458..080bd2d 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -44,6 +44,12 @@
 	off = 1;
 }
 
+bool cpuidle_not_available(struct cpuidle_driver *drv,
+			   struct cpuidle_device *dev)
+{
+	return off || !initialized || !drv || !dev || !dev->enabled;
+}
+
 /**
  * cpuidle_play_dead - cpu off-lining
  *
@@ -66,14 +72,8 @@
 	return -ENODEV;
 }
 
-/**
- * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
- * @drv: cpuidle driver for the given CPU.
- * @dev: cpuidle device for the given CPU.
- * @freeze: Whether or not the state should be suitable for suspend-to-idle.
- */
-static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-				      struct cpuidle_device *dev, bool freeze)
+static int find_deepest_state(struct cpuidle_driver *drv,
+			      struct cpuidle_device *dev, bool freeze)
 {
 	unsigned int latency_req = 0;
 	int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@
 	return ret;
 }
 
+/**
+ * cpuidle_find_deepest_state - Find the deepest available idle state.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
+ */
+int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+			       struct cpuidle_device *dev)
+{
+	return find_deepest_state(drv, dev, false);
+}
+
 static void enter_freeze_proper(struct cpuidle_driver *drv,
 				struct cpuidle_device *dev, int index)
 {
@@ -113,15 +124,14 @@
 
 /**
  * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
  *
  * If there are states with the ->enter_freeze callback, find the deepest of
- * them and enter it with frozen tick.  Otherwise, find the deepest state
- * available and enter it normally.
+ * them and enter it with frozen tick.
  */
-void cpuidle_enter_freeze(void)
+int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
-	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 	int index;
 
 	/*
@@ -129,24 +139,11 @@
 	 * that interrupts won't be enabled when it exits and allows the tick to
 	 * be frozen safely.
 	 */
-	index = cpuidle_find_deepest_state(drv, dev, true);
-	if (index >= 0) {
-		enter_freeze_proper(drv, dev, index);
-		return;
-	}
-
-	/*
-	 * It is not safe to freeze the tick, find the deepest state available
-	 * at all and try to enter it normally.
-	 */
-	index = cpuidle_find_deepest_state(drv, dev, false);
+	index = find_deepest_state(drv, dev, true);
 	if (index >= 0)
-		cpuidle_enter(drv, dev, index);
-	else
-		arch_cpu_idle();
+		enter_freeze_proper(drv, dev, index);
 
-	/* Interrupts are enabled again here. */
-	local_irq_disable();
+	return index;
 }
 
 /**
@@ -205,12 +202,6 @@
  */
 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-	if (off || !initialized)
-		return -ENODEV;
-
-	if (!drv || !dev || !dev->enabled)
-		return -EBUSY;
-
 	return cpuidle_curr_governor->select(drv, dev);
 }
 
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index e554111..50ef8bd 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -159,6 +159,9 @@
 	if (WARN_ON(timeout < 0))
 		return -EINVAL;
 
+	if (timeout == 0)
+		return fence_is_signaled(fence);
+
 	trace_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
 	trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 3c97c8f..39920d7 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -327,6 +327,9 @@
 	unsigned seq, shared_count, i = 0;
 	long ret = timeout;
 
+	if (!timeout)
+		return reservation_object_test_signaled_rcu(obj, wait_all);
+
 retry:
 	fence = NULL;
 	shared_count = 0;
@@ -402,8 +405,6 @@
 	int ret = 1;
 
 	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-		int ret;
-
 		fence = fence_get_rcu(lfence);
 		if (!fence)
 			return -1;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 09e2825..d9891d3 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -664,7 +664,6 @@
 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
 	unsigned int		periods = buf_len / period_len;
 	int			i;
-	u32			cfg;
 
 	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
 		__func__, &buf_addr, buf_len, period_len,
@@ -700,17 +699,17 @@
 		if (direction == DMA_DEV_TO_MEM) {
 			desc->lld.mbr_sa = atchan->per_src_addr;
 			desc->lld.mbr_da = buf_addr + i * period_len;
-			cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
 		} else {
 			desc->lld.mbr_sa = buf_addr + i * period_len;
 			desc->lld.mbr_da = atchan->per_dst_addr;
-			cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
 		}
 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
 			| AT_XDMAC_MBR_UBC_NDEN
 			| AT_XDMAC_MBR_UBC_NSEN
 			| AT_XDMAC_MBR_UBC_NDE
-			| period_len >> at_xdmac_get_dwidth(cfg);
+			| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
 
 		dev_dbg(chan2dev(chan),
 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 455b7a4..a8ad052 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -626,7 +626,7 @@
 	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
 
 	/* Check if we have any interrupt from the DMAC */
-	if (!status)
+	if (!status || !dw->in_use)
 		return IRQ_NONE;
 
 	/*
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 77a6dcf..194ec20 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -230,6 +230,10 @@
 	switch (pdev->device) {
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
 	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
 		return true;
 	default:
 		return false;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8926f27..eb41004 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -219,6 +219,9 @@
 
 	while (dint) {
 		i = __ffs(dint);
+		/* only handle interrupts belonging to pdma driver*/
+		if (i >= pdev->dma_channels)
+			break;
 		dint &= (dint - 1);
 		phy = &pdev->phy[i];
 		ret = mmp_pdma_chan_handler(irq, phy);
@@ -999,6 +1002,9 @@
 	struct resource *iores;
 	int i, ret, irq = 0;
 	int dma_channels = 0, irq_num = 0;
+	const enum dma_slave_buswidth widths =
+		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
 
 	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
 	if (!pdev)
@@ -1066,6 +1072,10 @@
 	pdev->device.device_config = mmp_pdma_config;
 	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
 	pdev->device.copy_align = PDMA_ALIGNMENT;
+	pdev->device.src_addr_widths = widths;
+	pdev->device.dst_addr_widths = widths;
+	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+	pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 
 	if (pdev->dev->coherent_dma_mask)
 		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 70c2fa9..b6f4e1f 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -110,7 +110,7 @@
 	struct tasklet_struct		tasklet;
 
 	struct mmp_tdma_desc		*desc_arr;
-	phys_addr_t			desc_arr_phys;
+	dma_addr_t			desc_arr_phys;
 	int				desc_num;
 	enum dma_transfer_direction	dir;
 	dma_addr_t			dev_addr;
@@ -166,9 +166,12 @@
 static int mmp_tdma_disable_chan(struct dma_chan *chan)
 {
 	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+	u32 tdcr;
 
-	writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
-					tdmac->reg_base + TDCR);
+	tdcr = readl(tdmac->reg_base + TDCR);
+	tdcr |= TDCR_ABR;
+	tdcr &= ~TDCR_CHANEN;
+	writel(tdcr, tdmac->reg_base + TDCR);
 
 	tdmac->status = DMA_COMPLETE;
 
@@ -296,12 +299,27 @@
 	return -EAGAIN;
 }
 
+static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac)
+{
+	size_t reg;
+
+	if (tdmac->idx == 0) {
+		reg = __raw_readl(tdmac->reg_base + TDSAR);
+		reg -= tdmac->desc_arr[0].src_addr;
+	} else if (tdmac->idx == 1) {
+		reg = __raw_readl(tdmac->reg_base + TDDAR);
+		reg -= tdmac->desc_arr[0].dst_addr;
+	} else
+		return -EINVAL;
+
+	return reg;
+}
+
 static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
 {
 	struct mmp_tdma_chan *tdmac = dev_id;
 
 	if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
-		tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
 		tasklet_schedule(&tdmac->tasklet);
 		return IRQ_HANDLED;
 	} else
@@ -343,7 +361,7 @@
 	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
 	gpool = tdmac->pool;
-	if (tdmac->desc_arr)
+	if (gpool && tdmac->desc_arr)
 		gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
 				size);
 	tdmac->desc_arr = NULL;
@@ -499,6 +517,7 @@
 {
 	struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 
+	tdmac->pos = mmp_tdma_get_pos(tdmac);
 	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
 			 tdmac->buf_len - tdmac->pos);
 
@@ -610,7 +629,7 @@
 	int i, ret;
 	int irq = 0, irq_num = 0;
 	int chan_num = TDMA_CHANNEL_NUM;
-	struct gen_pool *pool;
+	struct gen_pool *pool = NULL;
 
 	of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
 	if (of_id)
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index d7a33b3..9c914d6 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -162,9 +162,9 @@
 	[BAM_P_IRQ_STTS]	= { 0x1010, 0x1000, 0x00, 0x00 },
 	[BAM_P_IRQ_CLR]		= { 0x1014, 0x1000, 0x00, 0x00 },
 	[BAM_P_IRQ_EN]		= { 0x1018, 0x1000, 0x00, 0x00 },
-	[BAM_P_EVNT_DEST_ADDR]	= { 0x102C, 0x00, 0x1000, 0x00 },
-	[BAM_P_EVNT_REG]	= { 0x1018, 0x00, 0x1000, 0x00 },
-	[BAM_P_SW_OFSTS]	= { 0x1000, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_DEST_ADDR]	= { 0x182C, 0x00, 0x1000, 0x00 },
+	[BAM_P_EVNT_REG]	= { 0x1818, 0x00, 0x1000, 0x00 },
+	[BAM_P_SW_OFSTS]	= { 0x1800, 0x00, 0x1000, 0x00 },
 	[BAM_P_DATA_FIFO_ADDR]	= { 0x1824, 0x00, 0x1000, 0x00 },
 	[BAM_P_DESC_FIFO_ADDR]	= { 0x181C, 0x00, 0x1000, 0x00 },
 	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1828, 0x00, 0x1000, 0x00 },
@@ -1143,6 +1143,10 @@
 	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
 
 	/* initialize dmaengine apis */
+	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
 	bdev->common.device_free_chan_resources = bam_free_chan;
 	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index b2431aa..9f1d4c7 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -582,15 +582,12 @@
 	}
 }
 
-static void sh_dmae_shutdown(struct platform_device *pdev)
-{
-	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
-	sh_dmae_ctl_stop(shdev);
-}
-
 #ifdef CONFIG_PM
 static int sh_dmae_runtime_suspend(struct device *dev)
 {
+	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+	sh_dmae_ctl_stop(shdev);
 	return 0;
 }
 
@@ -605,6 +602,9 @@
 #ifdef CONFIG_PM_SLEEP
 static int sh_dmae_suspend(struct device *dev)
 {
+	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+	sh_dmae_ctl_stop(shdev);
 	return 0;
 }
 
@@ -929,13 +929,12 @@
 }
 
 static struct platform_driver sh_dmae_driver = {
-	.driver 	= {
+	.driver		= {
 		.pm	= &sh_dmae_pm,
 		.name	= SH_DMAE_DRV_NAME,
 		.of_match_table = sh_dmae_of_match,
 	},
 	.remove		= sh_dmae_remove,
-	.shutdown	= sh_dmae_shutdown,
 };
 
 static int __init sh_dmae_init(void)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index c5f7b4e..69fac06 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -78,7 +78,7 @@
  *	We have to be cautious here. We have seen BIOSes with DMI pointers
  *	pointing to completely the wrong place for example
  */
-static void dmi_table(u8 *buf, int len, int num,
+static void dmi_table(u8 *buf, u32 len, int num,
 		      void (*decode)(const struct dmi_header *, void *),
 		      void *private_data)
 {
@@ -93,12 +93,6 @@
 		const struct dmi_header *dm = (const struct dmi_header *)data;
 
 		/*
-		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
-		 */
-		if (dm->type == DMI_ENTRY_END_OF_TABLE)
-			break;
-
-		/*
 		 *  We want to know the total length (formatted area and
 		 *  strings) before decoding to make sure we won't run off the
 		 *  table in dmi_decode or dmi_string
@@ -108,13 +102,20 @@
 			data++;
 		if (data - buf < len - 1)
 			decode(dm, private_data);
+
+		/*
+		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+		 */
+		if (dm->type == DMI_ENTRY_END_OF_TABLE)
+			break;
+
 		data += 2;
 		i++;
 	}
 }
 
 static phys_addr_t dmi_base;
-static u16 dmi_len;
+static u32 dmi_len;
 static u16 dmi_num;
 
 static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index af5d63c..f07d4a6 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -75,29 +75,25 @@
 	unsigned long key;
 	u32 desc_version;
 
-	*map_size = 0;
-	*desc_size = 0;
-	key = 0;
-	status = efi_call_early(get_memory_map, map_size, NULL,
-				&key, desc_size, &desc_version);
-	if (status != EFI_BUFFER_TOO_SMALL)
-		return EFI_LOAD_ERROR;
-
+	*map_size = sizeof(*m) * 32;
+again:
 	/*
 	 * Add an additional efi_memory_desc_t because we're doing an
 	 * allocation which may be in a new descriptor region.
 	 */
-	*map_size += *desc_size;
+	*map_size += sizeof(*m);
 	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
 				*map_size, (void **)&m);
 	if (status != EFI_SUCCESS)
 		goto fail;
 
+	*desc_size = 0;
+	key = 0;
 	status = efi_call_early(get_memory_map, map_size, m,
 				&key, desc_size, &desc_version);
 	if (status == EFI_BUFFER_TOO_SMALL) {
 		efi_call_early(free_pool, m);
-		return EFI_LOAD_ERROR;
+		goto again;
 	}
 
 	if (status != EFI_SUCCESS)
@@ -183,12 +179,12 @@
 		start = desc->phys_addr;
 		end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
 
-		if ((start + size) > end || (start + size) > max)
-			continue;
-
-		if (end - size > max)
+		if (end > max)
 			end = max;
 
+		if ((start + size) > end)
+			continue;
+
 		if (round_down(end - size, align) < start)
 			continue;
 
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 472fb5b..9cdbc0c 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@
 	struct gpio_chip gpio_chip;
 };
 
+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
+
 static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 	int val;
 
 	val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@
 static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
 			      int value)
 {
-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
 	if (value)
 		tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@
 static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
 				int value)
 {
-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
 	/* Set the initial value */
 	tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@
 
 static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
 {
-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
 
 	return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
 								GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8cad8e4..4650bf8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -46,12 +46,13 @@
 
 	ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
 	if (ret < 0) {
-		/* We've found the gpio chip, but the translation failed.
-		 * Return true to stop looking and return the translation
-		 * error via out_gpio
+		/* We've found a gpio chip, but the translation failed.
+		 * Store translation error in out_gpio.
+		 * Return false to keep looking, as more than one gpio chip
+		 * could be registered per of-node.
 		 */
 		gg_data->out_gpio = ERR_PTR(ret);
-		return true;
+		return false;
 	 }
 
 	gg_data->out_gpio = gpiochip_get_desc(gc, ret);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b3589d0..910ff8a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -62,12 +62,18 @@
 	return KFD_MQD_TYPE_CP;
 }
 
-static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
+unsigned int get_first_pipe(struct device_queue_manager *dqm)
 {
-	BUG_ON(!dqm);
+	BUG_ON(!dqm || !dqm->dev);
 	return dqm->dev->shared_resources.first_compute_pipe;
 }
 
+unsigned int get_pipes_num(struct device_queue_manager *dqm)
+{
+	BUG_ON(!dqm || !dqm->dev);
+	return dqm->dev->shared_resources.compute_pipe_count;
+}
+
 static inline unsigned int get_pipes_num_cpsch(void)
 {
 	return PIPE_PER_ME_CP_SCHEDULING;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index d64f86c..488f51d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -163,6 +163,8 @@
 					struct qcm_process_device *qpd);
 int init_pipelines(struct device_queue_manager *dqm,
 		unsigned int pipes_num, unsigned int first_pipe);
+unsigned int get_first_pipe(struct device_queue_manager *dqm);
+unsigned int get_pipes_num(struct device_queue_manager *dqm);
 
 extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
@@ -175,10 +177,4 @@
 	return (pdd->lds_base >> 60) & 0x0E;
 }
 
-extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
-{
-	BUG_ON(!dqm || !dqm->dev);
-	return dqm->dev->shared_resources.compute_pipe_count;
-}
-
 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 6b07246..5469efe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -131,5 +131,5 @@
 
 static int initialize_cpsch_cik(struct device_queue_manager *dqm)
 {
-	return init_pipelines(dqm, get_pipes_num(dqm), 0);
+	return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
 }
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0409b90..b3e3068 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -153,7 +153,7 @@
 		     (adj->crtc_hdisplay - 1) |
 		     ((adj->crtc_vdisplay - 1) << 16));
 
-	cfg = ATMEL_HLCDC_CLKPOL;
+	cfg = 0;
 
 	prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
 	mode_rate = mode->crtc_clock * 1000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 7320a6c..c1cb174 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -311,8 +311,6 @@
 
 	pm_runtime_enable(dev->dev);
 
-	pm_runtime_put_sync(dev->dev);
-
 	ret = atmel_hlcdc_dc_modeset_init(dev);
 	if (ret < 0) {
 		dev_err(dev->dev, "failed to initialize mode setting\n");
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
index 063d2a7..e79bd9b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
@@ -311,7 +311,8 @@
 
 	/* Disable the layer */
 	regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
-		     ATMEL_HLCDC_LAYER_RST);
+		     ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
+		     ATMEL_HLCDC_LAYER_UPDATE);
 
 	/* Clear all pending interrupts */
 	regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b00173..f6d04c7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -43,9 +43,10 @@
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
 
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
-							struct drm_mode_fb_cmd2 *r,
-							struct drm_file *file_priv);
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+			    struct drm_mode_fb_cmd2 *r,
+			    struct drm_file *file_priv);
 
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)				\
@@ -2127,7 +2128,6 @@
 	DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
 
 	mutex_lock(&dev->mode_config.mutex);
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
 	connector = drm_connector_find(dev, out_resp->connector_id);
 	if (!connector) {
@@ -2157,6 +2157,8 @@
 	out_resp->mm_height = connector->display_info.height_mm;
 	out_resp->subpixel = connector->display_info.subpixel_order;
 	out_resp->connection = connector->status;
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 	encoder = drm_connector_get_encoder(connector);
 	if (encoder)
 		out_resp->encoder_id = encoder->base.id;
@@ -2907,13 +2909,11 @@
 	 */
 	if (req->flags & DRM_MODE_CURSOR_BO) {
 		if (req->handle) {
-			fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+			fb = internal_framebuffer_create(dev, &fbreq, file_priv);
 			if (IS_ERR(fb)) {
 				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
 				return PTR_ERR(fb);
 			}
-
-			drm_framebuffer_reference(fb);
 		} else {
 			fb = NULL;
 		}
@@ -3266,9 +3266,10 @@
 	return 0;
 }
 
-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
-							struct drm_mode_fb_cmd2 *r,
-							struct drm_file *file_priv)
+static struct drm_framebuffer *
+internal_framebuffer_create(struct drm_device *dev,
+			    struct drm_mode_fb_cmd2 *r,
+			    struct drm_file *file_priv)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_framebuffer *fb;
@@ -3300,12 +3301,6 @@
 		return fb;
 	}
 
-	mutex_lock(&file_priv->fbs_lock);
-	r->fb_id = fb->base.id;
-	list_add(&fb->filp_head, &file_priv->fbs);
-	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
-	mutex_unlock(&file_priv->fbs_lock);
-
 	return fb;
 }
 
@@ -3327,15 +3322,24 @@
 int drm_mode_addfb2(struct drm_device *dev,
 		    void *data, struct drm_file *file_priv)
 {
+	struct drm_mode_fb_cmd2 *r = data;
 	struct drm_framebuffer *fb;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
-	fb = add_framebuffer_internal(dev, data, file_priv);
+	fb = internal_framebuffer_create(dev, r, file_priv);
 	if (IS_ERR(fb))
 		return PTR_ERR(fb);
 
+	/* Transfer ownership to the filp for reaping on close */
+
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	mutex_lock(&file_priv->fbs_lock);
+	r->fb_id = fb->base.id;
+	list_add(&fb->filp_head, &file_priv->fbs);
+	mutex_unlock(&file_priv->fbs_lock);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b687..379ab45 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@
 			      struct drm_dp_sideband_msg_tx *txmsg)
 {
 	bool ret;
-	mutex_lock(&mgr->qlock);
+
+	/*
+	 * All updates to txmsg->state are protected by mgr->qlock, and the two
+	 * cases we check here are terminal states. For those the barriers
+	 * provided by the wake_up/wait_event pair are enough.
+	 */
 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
-	mutex_unlock(&mgr->qlock);
 	return ret;
 }
 
@@ -1363,12 +1367,13 @@
 	return 0;
 }
 
-/* must be called holding qlock */
 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&mgr->qlock));
+
 	/* construct a chunk from the first msg in the tx_msg queue */
 	if (list_empty(&mgr->tx_msg_downq)) {
 		mgr->tx_down_in_progress = false;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04a209e..1134526 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,29 +91,29 @@
  */
 
 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						unsigned long size,
+						u64 size,
 						unsigned alignment,
 						unsigned long color,
 						enum drm_mm_search_flags flags);
 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-						unsigned long size,
+						u64 size,
 						unsigned alignment,
 						unsigned long color,
-						unsigned long start,
-						unsigned long end,
+						u64 start,
+						u64 end,
 						enum drm_mm_search_flags flags);
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
-				 unsigned long size, unsigned alignment,
+				 u64 size, unsigned alignment,
 				 unsigned long color,
 				 enum drm_mm_allocator_flags flags)
 {
 	struct drm_mm *mm = hole_node->mm;
-	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
-	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
-	unsigned long adj_start = hole_start;
-	unsigned long adj_end = hole_end;
+	u64 hole_start = drm_mm_hole_node_start(hole_node);
+	u64 hole_end = drm_mm_hole_node_end(hole_node);
+	u64 adj_start = hole_start;
+	u64 adj_end = hole_end;
 
 	BUG_ON(node->allocated);
 
@@ -124,12 +124,15 @@
 		adj_start = adj_end - size;
 
 	if (alignment) {
-		unsigned tmp = adj_start % alignment;
-		if (tmp) {
+		u64 tmp = adj_start;
+		unsigned rem;
+
+		rem = do_div(tmp, alignment);
+		if (rem) {
 			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= tmp;
+				adj_start -= rem;
 			else
-				adj_start += alignment - tmp;
+				adj_start += alignment - rem;
 		}
 	}
 
@@ -176,9 +179,9 @@
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
 	struct drm_mm_node *hole;
-	unsigned long end = node->start + node->size;
-	unsigned long hole_start;
-	unsigned long hole_end;
+	u64 end = node->start + node->size;
+	u64 hole_start;
+	u64 hole_end;
 
 	BUG_ON(node == NULL);
 
@@ -227,7 +230,7 @@
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-			       unsigned long size, unsigned alignment,
+			       u64 size, unsigned alignment,
 			       unsigned long color,
 			       enum drm_mm_search_flags sflags,
 			       enum drm_mm_allocator_flags aflags)
@@ -246,16 +249,16 @@
 
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 				       struct drm_mm_node *node,
-				       unsigned long size, unsigned alignment,
+				       u64 size, unsigned alignment,
 				       unsigned long color,
-				       unsigned long start, unsigned long end,
+				       u64 start, u64 end,
 				       enum drm_mm_allocator_flags flags)
 {
 	struct drm_mm *mm = hole_node->mm;
-	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
-	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
-	unsigned long adj_start = hole_start;
-	unsigned long adj_end = hole_end;
+	u64 hole_start = drm_mm_hole_node_start(hole_node);
+	u64 hole_end = drm_mm_hole_node_end(hole_node);
+	u64 adj_start = hole_start;
+	u64 adj_end = hole_end;
 
 	BUG_ON(!hole_node->hole_follows || node->allocated);
 
@@ -271,12 +274,15 @@
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
 	if (alignment) {
-		unsigned tmp = adj_start % alignment;
-		if (tmp) {
+		u64 tmp = adj_start;
+		unsigned rem;
+
+		rem = do_div(tmp, alignment);
+		if (rem) {
 			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= tmp;
+				adj_start -= rem;
 			else
-				adj_start += alignment - tmp;
+				adj_start += alignment - rem;
 		}
 	}
 
@@ -324,9 +330,9 @@
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-					unsigned long size, unsigned alignment,
+					u64 size, unsigned alignment,
 					unsigned long color,
-					unsigned long start, unsigned long end,
+					u64 start, u64 end,
 					enum drm_mm_search_flags sflags,
 					enum drm_mm_allocator_flags aflags)
 {
@@ -387,32 +393,34 @@
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-static int check_free_hole(unsigned long start, unsigned long end,
-			   unsigned long size, unsigned alignment)
+static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
 {
 	if (end - start < size)
 		return 0;
 
 	if (alignment) {
-		unsigned tmp = start % alignment;
-		if (tmp)
-			start += alignment - tmp;
+		u64 tmp = start;
+		unsigned rem;
+
+		rem = do_div(tmp, alignment);
+		if (rem)
+			start += alignment - rem;
 	}
 
 	return end >= start + size;
 }
 
 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						      unsigned long size,
+						      u64 size,
 						      unsigned alignment,
 						      unsigned long color,
 						      enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
-	unsigned long adj_start;
-	unsigned long adj_end;
-	unsigned long best_size;
+	u64 adj_start;
+	u64 adj_end;
+	u64 best_size;
 
 	BUG_ON(mm->scanned_blocks);
 
@@ -421,7 +429,7 @@
 
 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
 			       flags & DRM_MM_SEARCH_BELOW) {
-		unsigned long hole_size = adj_end - adj_start;
+		u64 hole_size = adj_end - adj_start;
 
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -445,18 +453,18 @@
 }
 
 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-							unsigned long size,
+							u64 size,
 							unsigned alignment,
 							unsigned long color,
-							unsigned long start,
-							unsigned long end,
+							u64 start,
+							u64 end,
 							enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
-	unsigned long adj_start;
-	unsigned long adj_end;
-	unsigned long best_size;
+	u64 adj_start;
+	u64 adj_end;
+	u64 best_size;
 
 	BUG_ON(mm->scanned_blocks);
 
@@ -465,7 +473,7 @@
 
 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
 			       flags & DRM_MM_SEARCH_BELOW) {
-		unsigned long hole_size = adj_end - adj_start;
+		u64 hole_size = adj_end - adj_start;
 
 		if (adj_start < start)
 			adj_start = start;
@@ -561,7 +569,7 @@
  * adding/removing nodes to/from the scan list are allowed.
  */
 void drm_mm_init_scan(struct drm_mm *mm,
-		      unsigned long size,
+		      u64 size,
 		      unsigned alignment,
 		      unsigned long color)
 {
@@ -594,11 +602,11 @@
  * adding/removing nodes to/from the scan list are allowed.
  */
 void drm_mm_init_scan_with_range(struct drm_mm *mm,
-				 unsigned long size,
+				 u64 size,
 				 unsigned alignment,
 				 unsigned long color,
-				 unsigned long start,
-				 unsigned long end)
+				 u64 start,
+				 u64 end)
 {
 	mm->scan_color = color;
 	mm->scan_alignment = alignment;
@@ -627,8 +635,8 @@
 {
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
-	unsigned long hole_start, hole_end;
-	unsigned long adj_start, adj_end;
+	u64 hole_start, hole_end;
+	u64 adj_start, adj_end;
 
 	mm->scanned_blocks++;
 
@@ -731,7 +739,7 @@
  *
  * Note that @mm must be cleared to 0 before calling this function.
  */
-void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 {
 	INIT_LIST_HEAD(&mm->hole_stack);
 	mm->scanned_blocks = 0;
@@ -766,18 +774,17 @@
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
-static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
-				       const char *prefix)
+static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
+				     const char *prefix)
 {
-	unsigned long hole_start, hole_end, hole_size;
+	u64 hole_start, hole_end, hole_size;
 
 	if (entry->hole_follows) {
 		hole_start = drm_mm_hole_node_start(entry);
 		hole_end = drm_mm_hole_node_end(entry);
 		hole_size = hole_end - hole_start;
-		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
-			prefix, hole_start, hole_end,
-			hole_size);
+		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
+			 hole_end, hole_size);
 		return hole_size;
 	}
 
@@ -792,35 +799,34 @@
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 {
 	struct drm_mm_node *entry;
-	unsigned long total_used = 0, total_free = 0, total = 0;
+	u64 total_used = 0, total_free = 0, total = 0;
 
 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
 	drm_mm_for_each_node(entry, mm) {
-		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
-			prefix, entry->start, entry->start + entry->size,
-			entry->size);
+		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
+			 entry->start + entry->size, entry->size);
 		total_used += entry->size;
 		total_free += drm_mm_debug_hole(entry, prefix);
 	}
 	total = total_free + total_used;
 
-	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
-		total_used, total_free);
+	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
+		 total_used, total_free);
 }
 EXPORT_SYMBOL(drm_mm_debug_table);
 
 #if defined(CONFIG_DEBUG_FS)
-static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
-	unsigned long hole_start, hole_end, hole_size;
+	u64 hole_start, hole_end, hole_size;
 
 	if (entry->hole_follows) {
 		hole_start = drm_mm_hole_node_start(entry);
 		hole_end = drm_mm_hole_node_end(entry);
 		hole_size = hole_end - hole_start;
-		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
-				hole_start, hole_end, hole_size);
+		seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
+			   hole_end, hole_size);
 		return hole_size;
 	}
 
@@ -835,20 +841,20 @@
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 {
 	struct drm_mm_node *entry;
-	unsigned long total_used = 0, total_free = 0, total = 0;
+	u64 total_used = 0, total_free = 0, total = 0;
 
 	total_free += drm_mm_dump_hole(m, &mm->head_node);
 
 	drm_mm_for_each_node(entry, mm) {
-		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
-				entry->start, entry->start + entry->size,
-				entry->size);
+		seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
+			   entry->start + entry->size, entry->size);
 		total_used += entry->size;
 		total_free += drm_mm_dump_hole(m, entry);
 	}
 	total = total_free + total_used;
 
-	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
+	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
+		   total_used, total_free);
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 96e811f..e8b18e5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -152,12 +152,12 @@
 			seq_puts(m, " (pp");
 		else
 			seq_puts(m, " (g");
-		seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
+		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
 			   vma->node.start, vma->node.size,
 			   vma->ggtt_view.type);
 	}
 	if (obj->stolen)
-		seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 	if (obj->pin_mappable || obj->fault_mappable) {
 		char s[3], *t = s;
 		if (obj->pin_mappable)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8039cec..cc6ea53 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -622,7 +622,7 @@
 	return 0;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev)
+static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 {
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
 	int ret;
@@ -636,7 +636,17 @@
 	}
 
 	pci_disable_device(drm_dev->pdev);
-	pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+	/*
+	 * During hibernation on some GEN4 platforms the BIOS may try to access
+	 * the device even though it's already in D3 and hang the machine. So
+	 * leave the device in D0 on those platforms and hope the BIOS will
+	 * power down the device properly. Platforms where this was seen:
+	 * Lenovo Thinkpad X301, X61s
+	 */
+	if (!(hibernation &&
+	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+	      INTEL_INFO(dev_priv)->gen == 4))
+		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
 	return 0;
 }
@@ -662,7 +672,7 @@
 	if (error)
 		return error;
 
-	return i915_drm_suspend_late(dev);
+	return i915_drm_suspend_late(dev, false);
 }
 
 static int i915_drm_resume(struct drm_device *dev)
@@ -950,7 +960,17 @@
 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend_late(drm_dev);
+	return i915_drm_suspend_late(drm_dev, false);
+}
+
+static int i915_pm_poweroff_late(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	return i915_drm_suspend_late(drm_dev, true);
 }
 
 static int i915_pm_resume_early(struct device *dev)
@@ -1520,7 +1540,7 @@
 	.thaw_early = i915_pm_resume_early,
 	.thaw = i915_pm_resume,
 	.poweroff = i915_pm_suspend,
-	.poweroff_late = i915_pm_suspend_late,
+	.poweroff_late = i915_pm_poweroff_late,
 	.restore_early = i915_pm_resume_early,
 	.restore = i915_pm_resume,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f2a825e..8727086 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2114,6 +2114,9 @@
  * number comparisons on buffer last_read|write_seqno. It also allows an
  * emission time to be associated with the request for tracking how far ahead
  * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
  */
 struct drm_i915_gem_request {
 	struct kref ref;
@@ -2137,7 +2140,16 @@
 	/** Position in the ringbuffer of the end of the whole request */
 	u32 tail;
 
-	/** Context related to this request */
+	/**
+	 * Context related to this request
+	 * Contexts are refcounted, so when this request is associated with a
+	 * context, we must increment the context's refcount, to guarantee that
+	 * it persists while any request is linked to it. Requests themselves
+	 * are also refcounted, so the request will only be freed when the last
+	 * reference to it is dismissed, and the code in
+	 * i915_gem_request_free() will then decrement the refcount on the
+	 * context.
+	 */
 	struct intel_context *ctx;
 
 	/** Batch buffer related to this request if any */
@@ -2374,6 +2386,7 @@
 				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
 				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
+				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
 				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c26d36c..5b20586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2659,8 +2659,7 @@
 		if (submit_req->ctx != ring->default_context)
 			intel_lr_context_unpin(ring, submit_req->ctx);
 
-		i915_gem_context_unreference(submit_req->ctx);
-		kfree(submit_req);
+		i915_gem_request_unreference(submit_req);
 	}
 
 	/*
@@ -2937,9 +2936,9 @@
 	req = obj->last_read_req;
 
 	/* Do this after OLR check to make sure we make forward progress polling
-	 * on this IOCTL with a timeout <=0 (like busy ioctl)
+	 * on this IOCTL with a timeout == 0 (like busy ioctl)
 	 */
-	if (args->timeout_ns <= 0) {
+	if (args->timeout_ns == 0) {
 		ret = -ETIME;
 		goto out;
 	}
@@ -2949,7 +2948,8 @@
 	i915_gem_request_reference(req);
 	mutex_unlock(&dev->struct_mutex);
 
-	ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+	ret = __i915_wait_request(req, reset_counter, true,
+				  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 				  file->driver_priv);
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_request_unreference(req);
@@ -4793,6 +4793,9 @@
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
 
+	/* Double layer security blanket, see i915_gem_init() */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
 	if (dev_priv->ellc_size)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
@@ -4825,7 +4828,7 @@
 	for_each_ring(ring, dev_priv, i) {
 		ret = ring->init_hw(ring);
 		if (ret)
-			return ret;
+			goto out;
 	}
 
 	for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4842,9 +4845,11 @@
 		DRM_ERROR("Context enable failed %d\n", ret);
 		i915_gem_cleanup_ringbuffer(dev);
 
-		return ret;
+		goto out;
 	}
 
+out:
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	return ret;
 }
 
@@ -4878,6 +4883,14 @@
 		dev_priv->gt.stop_ring = intel_logical_ring_stop;
 	}
 
+	/* This is just a security blanket to placate dragons.
+	 * On some systems, we very sporadically observe that the first TLBs
+	 * used by the CS may be stale, despite us poking the TLB reset. If
+	 * we hold the forcewake during initialisation these problems
+	 * just magically go away.
+	 */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
 	ret = i915_gem_init_userptr(dev);
 	if (ret)
 		goto out_unlock;
@@ -4904,6 +4917,7 @@
 	}
 
 out_unlock:
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	mutex_unlock(&dev->struct_mutex);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 746f77f..dccdc8a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1145,7 +1145,7 @@
 
 	ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 
-	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
 			 ppgtt->node.size >> 20,
 			 ppgtt->node.start / PAGE_SIZE);
 
@@ -1713,8 +1713,8 @@
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
 				  unsigned long color,
-				  unsigned long *start,
-				  unsigned long *end)
+				  u64 *start,
+				  u64 *end)
 {
 	if (node->color != color)
 		*start += 4096;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a204584..9c6f93e 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -485,10 +485,8 @@
 			stolen_offset, gtt_offset, size);
 
 	/* KISS and expect everything to be page-aligned */
-	BUG_ON(stolen_offset & 4095);
-	BUG_ON(size & 4095);
-
-	if (WARN_ON(size == 0))
+	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
+	    WARN_ON(stolen_offset & 4095))
 		return NULL;
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7a24bd1..6377b22 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -335,9 +335,10 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
-		drm_gem_object_unreference_unlocked(&obj->base);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto err;
 	}
 
 	if (args->tiling_mode == I915_TILING_NONE) {
@@ -369,7 +370,6 @@
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
 	if (args->tiling_mode != obj->tiling_mode ||
 	    args->stride != obj->stride) {
 		/* We need to rebind the object if its current allocation
@@ -424,6 +424,7 @@
 		obj->bit_17 = NULL;
 	}
 
+err:
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4145d95..ede5bbb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1892,6 +1892,9 @@
 	u32 iir, gt_iir, pm_iir;
 	irqreturn_t ret = IRQ_NONE;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	while (true) {
 		/* Find, clear, then process each source of interrupt */
 
@@ -1936,6 +1939,9 @@
 	u32 master_ctl, iir;
 	irqreturn_t ret = IRQ_NONE;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	for (;;) {
 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
 		iir = I915_READ(VLV_IIR);
@@ -2208,6 +2214,9 @@
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	/* We get interrupts on unclaimed registers, so check for this before we
 	 * do any I915_{READ,WRITE}. */
 	intel_uncore_check_errors(dev);
@@ -2279,6 +2288,9 @@
 	enum pipe pipe;
 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	if (IS_GEN9(dev))
 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
 			GEN9_AUX_CHANNEL_D;
@@ -3771,6 +3783,9 @@
 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	iir = I915_READ16(IIR);
 	if (iir == 0)
 		return IRQ_NONE;
@@ -3951,6 +3966,9 @@
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 	int pipe, ret = IRQ_NONE;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	iir = I915_READ(IIR);
 	do {
 		bool irq_received = (iir & ~flip_mask) != 0;
@@ -4171,6 +4189,9 @@
 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
+	if (!intel_irqs_enabled(dev_priv))
+		return IRQ_NONE;
+
 	iir = I915_READ(IIR);
 
 	for (;;) {
@@ -4520,6 +4541,7 @@
 {
 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
 	dev_priv->pm.irqs_enabled = false;
+	synchronize_irq(dev_priv->dev->irq);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3d220a6..9943c20 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2371,13 +2371,19 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_gem_object *obj = NULL;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-	u32 base = plane_config->base;
+	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
+	u32 size_aligned = round_up(plane_config->base + plane_config->size,
+				    PAGE_SIZE);
+
+	size_aligned -= base_aligned;
 
 	if (plane_config->size == 0)
 		return false;
 
-	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
-							     plane_config->size);
+	obj = i915_gem_object_create_stolen_for_preallocated(dev,
+							     base_aligned,
+							     base_aligned,
+							     size_aligned);
 	if (!obj)
 		return false;
 
@@ -2725,10 +2731,19 @@
 	case DRM_FORMAT_XRGB8888:
 		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
 		break;
+	case DRM_FORMAT_ARGB8888:
+		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+		plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+		break;
 	case DRM_FORMAT_XBGR8888:
 		plane_ctl |= PLANE_CTL_ORDER_RGBX;
 		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
 		break;
+	case DRM_FORMAT_ABGR8888:
+		plane_ctl |= PLANE_CTL_ORDER_RGBX;
+		plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+		plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+		break;
 	case DRM_FORMAT_XRGB2101010:
 		plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
 		break;
@@ -6627,7 +6642,7 @@
 	aligned_height = intel_fb_align_height(dev, fb->height,
 					       plane_config->tiling);
 
-	plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+	plane_config->size = fb->pitches[0] * aligned_height;
 
 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 		      pipe_name(pipe), plane, fb->width, fb->height,
@@ -7664,7 +7679,7 @@
 	aligned_height = intel_fb_align_height(dev, fb->height,
 					       plane_config->tiling);
 
-	plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
+	plane_config->size = fb->pitches[0] * aligned_height;
 
 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 		      pipe_name(pipe), fb->width, fb->height,
@@ -7755,7 +7770,7 @@
 	aligned_height = intel_fb_align_height(dev, fb->height,
 					       plane_config->tiling);
 
-	plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
+	plane_config->size = fb->pitches[0] * aligned_height;
 
 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 		      pipe_name(pipe), fb->width, fb->height,
@@ -8698,6 +8713,7 @@
 			old->release_fb->funcs->destroy(old->release_fb);
 		goto fail;
 	}
+	crtc->primary->crtc = crtc;
 
 	/* let the connector get through one full cycle before testing */
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -9700,7 +9716,7 @@
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	WARN_ON(!in_irq());
+	WARN_ON(!in_interrupt());
 
 	if (crtc == NULL)
 		return;
@@ -12182,9 +12198,6 @@
 		return -ENOMEM;
 	}
 
-	if (fb == crtc->cursor->fb)
-		return 0;
-
 	/* we only need to pin inside GTT if cursor is non-phy */
 	mutex_lock(&dev->struct_mutex);
 	if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@@ -13096,6 +13109,9 @@
 
 	/* HP Chromebook 14 (Celeron 2955U) */
 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+	/* Dell Chromebook 11 */
+	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04e248d..54daa66 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -282,16 +282,6 @@
 	return ret;
 }
 
-static bool
-__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
-				      enum pipe pipe)
-{
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	return !intel_crtc->cpu_fifo_underrun_disabled;
-}
-
 /**
  * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
  * @dev_priv: i915 device instance
@@ -352,9 +342,15 @@
 void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+	/* We may be called too early in init, thanks BIOS! */
+	if (crtc == NULL)
+		return;
+
 	/* GMCH can't disable fifo underruns, filter them. */
 	if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
-	    !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
+	    to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
 		return;
 
 	if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0f358c5..e8d3da9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -503,18 +503,19 @@
 		 * If there isn't a request associated with this submission,
 		 * create one as a temporary holder.
 		 */
-		WARN(1, "execlist context submission without request");
 		request = kzalloc(sizeof(*request), GFP_KERNEL);
 		if (request == NULL)
 			return -ENOMEM;
 		request->ring = ring;
 		request->ctx = to;
+		kref_init(&request->ref);
+		request->uniq = dev_priv->request_uniq++;
+		i915_gem_context_reference(request->ctx);
 	} else {
+		i915_gem_request_reference(request);
 		WARN_ON(to != request->ctx);
 	}
 	request->tail = tail;
-	i915_gem_request_reference(request);
-	i915_gem_context_reference(request->ctx);
 
 	intel_runtime_pm_get(dev_priv);
 
@@ -731,7 +732,6 @@
 		if (ctx_obj && (ctx != ring->default_context))
 			intel_lr_context_unpin(ring, ctx);
 		intel_runtime_pm_put(dev_priv);
-		i915_gem_context_unreference(ctx);
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3ba..4e8fb89 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1048,8 +1048,14 @@
 
 		/* We need to init first for ECOBUS access and then
 		 * determine later if we want to reinit, in case of MT access is
-		 * not working
+		 * not working. In this stage we don't know which flavour this
+		 * ivb is, so it is better to reset also the gen6 fw registers
+		 * before the ecobus check.
 		 */
+
+		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
+		__raw_posting_read(dev_priv, ECOBUS);
+
 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 121d30c..87fe8ed 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -70,7 +70,9 @@
 		118800000, { 0x091c, 0x091c, 0x06dc },
 	}, {
 		216000000, { 0x06dc, 0x0b5c, 0x091c },
-	}
+	}, {
+		~0UL, { 0x0000, 0x0000, 0x0000 },
+	},
 };
 
 static const struct dw_hdmi_sym_term imx_sym_term[] = {
@@ -136,11 +138,34 @@
 	.destroy = drm_encoder_cleanup,
 };
 
+static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
+						  struct drm_display_mode *mode)
+{
+	if (mode->clock < 13500)
+		return MODE_CLOCK_LOW;
+	if (mode->clock > 266000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
+						   struct drm_display_mode *mode)
+{
+	if (mode->clock < 13500)
+		return MODE_CLOCK_LOW;
+	if (mode->clock > 270000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
 static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
-	.mpll_cfg = imx_mpll_cfg,
-	.cur_ctr  = imx_cur_ctr,
-	.sym_term = imx_sym_term,
-	.dev_type = IMX6Q_HDMI,
+	.mpll_cfg   = imx_mpll_cfg,
+	.cur_ctr    = imx_cur_ctr,
+	.sym_term   = imx_sym_term,
+	.dev_type   = IMX6Q_HDMI,
+	.mode_valid = imx6q_hdmi_mode_valid,
 };
 
 static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
@@ -148,6 +173,7 @@
 	.cur_ctr  = imx_cur_ctr,
 	.sym_term = imx_sym_term,
 	.dev_type = IMX6DL_HDMI,
+	.mode_valid = imx6dl_hdmi_mode_valid,
 };
 
 static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 1b86aac..2d6dc94 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -163,22 +163,7 @@
 {
 	struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
 	struct imx_ldb *ldb = imx_ldb_ch->ldb;
-	struct drm_display_mode *mode = &encoder->crtc->hwmode;
 	u32 pixel_fmt;
-	unsigned long serial_clk;
-	unsigned long di_clk = mode->clock * 1000;
-	int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
-
-	if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
-		/* dual channel LVDS mode */
-		serial_clk = 3500UL * mode->clock;
-		imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
-		imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
-	} else {
-		serial_clk = 7000UL * mode->clock;
-		imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
-				di_clk);
-	}
 
 	switch (imx_ldb_ch->chno) {
 	case 0:
@@ -247,6 +232,9 @@
 	struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
 	struct imx_ldb *ldb = imx_ldb_ch->ldb;
 	int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+	unsigned long serial_clk;
+	unsigned long di_clk = mode->clock * 1000;
+	int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
 
 	if (mode->clock > 170000) {
 		dev_warn(ldb->dev,
@@ -257,6 +245,16 @@
 			 "%s: mode exceeds 85 MHz pixel clock\n", __func__);
 	}
 
+	if (dual) {
+		serial_clk = 3500UL * mode->clock;
+		imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
+		imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
+	} else {
+		serial_clk = 7000UL * mode->clock;
+		imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
+				  di_clk);
+	}
+
 	/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
 	if (imx_ldb_ch == &ldb->channel[0]) {
 		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 5e83e00..900dda6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -236,8 +236,11 @@
 	}
 
 	panel_node = of_parse_phandle(np, "fsl,panel", 0);
-	if (panel_node)
+	if (panel_node) {
 		imxpd->panel = of_drm_find_panel(panel_node);
+		if (!imxpd->panel)
+			return -EPROBE_DEFER;
+	}
 
 	imxpd->dev = dev;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 8edd531c..7369ee7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -32,7 +32,10 @@
 void mdp4_irq_preinstall(struct msm_kms *kms)
 {
 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	mdp4_enable(mdp4_kms);
 	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+	mdp4_disable(mdp4_kms);
 }
 
 int mdp4_irq_postinstall(struct msm_kms *kms)
@@ -53,7 +56,9 @@
 void mdp4_irq_uninstall(struct msm_kms *kms)
 {
 	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+	mdp4_enable(mdp4_kms);
 	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+	mdp4_disable(mdp4_kms);
 }
 
 irqreturn_t mdp4_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 09b4a25..c276624 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,9 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (   8253 bytes, from 2014-12-08 16:13:00)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml            (  27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2014-06-02 18:31:15)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2015-01-23 16:20:19)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -910,6 +902,7 @@
 		case 2: return (mdp5_cfg->lm.base[2]);
 		case 3: return (mdp5_cfg->lm.base[3]);
 		case 4: return (mdp5_cfg->lm.base[4]);
+		case 5: return (mdp5_cfg->lm.base[5]);
 		default: return INVALID_IDX(idx);
 	}
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 46fac54..2f2863c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -62,8 +62,8 @@
 
 		/* current cursor being scanned out: */
 		struct drm_gem_object *scanout_bo;
-		uint32_t width;
-		uint32_t height;
+		uint32_t width, height;
+		uint32_t x, y;
 	} cursor;
 };
 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -103,8 +103,8 @@
 	struct drm_plane *plane;
 	uint32_t flush_mask = 0;
 
-	/* we could have already released CTL in the disable path: */
-	if (!mdp5_crtc->ctl)
+	/* this should not happen: */
+	if (WARN_ON(!mdp5_crtc->ctl))
 		return;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
@@ -143,6 +143,11 @@
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		mdp5_plane_complete_flip(plane);
 	}
+
+	if (mdp5_crtc->ctl && !crtc->state->enable) {
+		mdp5_ctl_release(mdp5_crtc->ctl);
+		mdp5_crtc->ctl = NULL;
+	}
 }
 
 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
@@ -386,14 +391,17 @@
 	mdp5_crtc->event = crtc->state->event;
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
+	/*
+	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
+	 * it means we are trying to flush a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!mdp5_crtc->ctl))
+		return;
+
 	blend_setup(crtc);
 	crtc_flush_all(crtc);
 	request_pending(crtc, PENDING_FLIP);
-
-	if (mdp5_crtc->ctl && !crtc->state->enable) {
-		mdp5_ctl_release(mdp5_crtc->ctl);
-		mdp5_crtc->ctl = NULL;
-	}
 }
 
 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -403,6 +411,32 @@
 	return -EINVAL;
 }
 
+static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	uint32_t xres = crtc->mode.hdisplay;
+	uint32_t yres = crtc->mode.vdisplay;
+
+	/*
+	 * Cursor Region Of Interest (ROI) is a plane read from cursor
+	 * buffer to render. The ROI region is determined by the visibility of
+	 * the cursor point. In the default Cursor image the cursor point will
+	 * be at the top left of the cursor image, unless it is specified
+	 * otherwise using hotspot feature.
+	 *
+	 * If the cursor point reaches the right (xres - x < cursor.width) or
+	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+	 * width and ROI height need to be evaluated to crop the cursor image
+	 * accordingly.
+	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
+	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
+	 */
+	*roi_w = min(mdp5_crtc->cursor.width, xres -
+			mdp5_crtc->cursor.x);
+	*roi_h = min(mdp5_crtc->cursor.height, yres -
+			mdp5_crtc->cursor.y);
+}
+
 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 		struct drm_file *file, uint32_t handle,
 		uint32_t width, uint32_t height)
@@ -416,6 +450,7 @@
 	unsigned int depth;
 	enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
 	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+	uint32_t roi_w, roi_h;
 	unsigned long flags;
 
 	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -446,6 +481,12 @@
 	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 	old_bo = mdp5_crtc->cursor.scanout_bo;
 
+	mdp5_crtc->cursor.scanout_bo = cursor_bo;
+	mdp5_crtc->cursor.width = width;
+	mdp5_crtc->cursor.height = height;
+
+	get_roi(crtc, &roi_w, &roi_h);
+
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
 			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -453,19 +494,14 @@
 			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
 			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
-			MDP5_LM_CURSOR_SIZE_ROI_H(height) |
-			MDP5_LM_CURSOR_SIZE_ROI_W(width));
+			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
 
-
 	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
-	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
 	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
 
-	mdp5_crtc->cursor.scanout_bo = cursor_bo;
-	mdp5_crtc->cursor.width = width;
-	mdp5_crtc->cursor.height = height;
 	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
 
 	ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
@@ -489,31 +525,18 @@
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
-	uint32_t xres = crtc->mode.hdisplay;
-	uint32_t yres = crtc->mode.vdisplay;
 	uint32_t roi_w;
 	uint32_t roi_h;
 	unsigned long flags;
 
-	x = (x > 0) ? x : 0;
-	y = (y > 0) ? y : 0;
+	/* In case the CRTC is disabled, just drop the cursor update */
+	if (unlikely(!crtc->state->enable))
+		return 0;
 
-	/*
-	 * Cursor Region Of Interest (ROI) is a plane read from cursor
-	 * buffer to render. The ROI region is determined by the visiblity of
-	 * the cursor point. In the default Cursor image the cursor point will
-	 * be at the top left of the cursor image, unless it is specified
-	 * otherwise using hotspot feature.
-	 *
-	 * If the cursor point reaches the right (xres - x < cursor.width) or
-	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
-	 * width and ROI height need to be evaluated to crop the cursor image
-	 * accordingly.
-	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
-	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
-	 */
-	roi_w = min(mdp5_crtc->cursor.width, xres - x);
-	roi_h = min(mdp5_crtc->cursor.height, yres - y);
+	mdp5_crtc->cursor.x = x = max(x, 0);
+	mdp5_crtc->cursor.y = y = max(y, 0);
+
+	get_roi(crtc, &roi_w, &roi_h);
 
 	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
 	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
@@ -544,8 +567,8 @@
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
 	.mode_fixup = mdp5_crtc_mode_fixup,
 	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
-	.prepare = mdp5_crtc_disable,
-	.commit = mdp5_crtc_enable,
+	.disable = mdp5_crtc_disable,
+	.enable = mdp5_crtc_enable,
 	.atomic_check = mdp5_crtc_atomic_check,
 	.atomic_begin = mdp5_crtc_atomic_begin,
 	.atomic_flush = mdp5_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index d6a14bb..af0e02f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -267,14 +267,14 @@
 	mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
 	spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-	mdp5_encoder->enabled = false;
+	mdp5_encoder->enabled = true;
 }
 
 static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
 	.mode_fixup = mdp5_encoder_mode_fixup,
 	.mode_set = mdp5_encoder_mode_set,
-	.prepare = mdp5_encoder_disable,
-	.commit = mdp5_encoder_enable,
+	.disable = mdp5_encoder_disable,
+	.enable = mdp5_encoder_enable,
 };
 
 /* initialize encoder */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 70ac81e..a940710 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -34,7 +34,10 @@
 void mdp5_irq_preinstall(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	mdp5_enable(mdp5_kms);
 	mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+	mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+	mdp5_disable(mdp5_kms);
 }
 
 int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -57,7 +60,9 @@
 void mdp5_irq_uninstall(struct msm_kms *kms)
 {
 	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+	mdp5_enable(mdp5_kms);
 	mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+	mdp5_disable(mdp5_kms);
 }
 
 static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 871aa21..18fd643 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -219,8 +219,10 @@
 	 * mark our set of crtc's as busy:
 	 */
 	ret = start_atomic(dev->dev_private, c->crtc_mask);
-	if (ret)
+	if (ret) {
+		kfree(c);
 		return ret;
+	}
 
 	/*
 	 * This is the point of no return - everything below never fails except
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 79924e4..6751553 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -418,7 +418,7 @@
 	nouveau_fbcon_zfill(dev, fbcon);
 
 	/* To allow resizeing without swapping buffers */
-	NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+	NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
 		nouveau_fb->base.width, nouveau_fb->base.height,
 		nvbo->bo.offset, nvbo);
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ed644a4..86807ee 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1405,6 +1405,9 @@
 	       (x << 16) | y);
 	viewport_w = crtc->mode.hdisplay;
 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	if ((rdev->family >= CHIP_BONAIRE) &&
+	    (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
+		viewport_h *= 2;
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5bf825d..8d74de8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -178,6 +178,13 @@
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
 	case DP_AUX_NATIVE_WRITE:
 	case DP_AUX_I2C_WRITE:
+		/* The atom implementation only supports writes with a max payload of
+		 * 12 bytes since it uses 4 bits for the total count (header + payload)
+		 * in the parameter space.  The atom interface supports 16 byte
+		 * payloads for reads. The hw itself supports up to 16 bytes of payload.
+		 */
+		if (WARN_ON_ONCE(msg->size > 12))
+			return -E2BIG;
 		/* tx_size needs to be 4 even for bare address packets since the atom
 		 * table needs the info in tx_buf[3].
 		 */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7c9df1e..c39c1d0 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -731,7 +731,9 @@
 		dig_connector = radeon_connector->con_priv;
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-			if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+			if (radeon_audio != 0 &&
+			    drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+			    ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
 				return ATOM_ENCODER_MODE_DP_AUDIO;
 			return ATOM_ENCODER_MODE_DP;
 		} else if (radeon_audio != 0) {
@@ -747,7 +749,9 @@
 		}
 		break;
 	case DRM_MODE_CONNECTOR_eDP:
-		if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
+		if (radeon_audio != 0 &&
+		    drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
+		    ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
 			return ATOM_ENCODER_MODE_DP_AUDIO;
 		return ATOM_ENCODER_MODE_DP;
 	case DRM_MODE_CONNECTOR_DVIA:
@@ -1622,7 +1626,6 @@
 	struct radeon_connector *radeon_connector = NULL;
 	struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
 	bool travis_quirk = false;
-	int encoder_mode;
 
 	if (connector) {
 		radeon_connector = to_radeon_connector(connector);
@@ -1718,11 +1721,6 @@
 		}
 		break;
 	}
-
-	encoder_mode = atombios_get_encoder_mode(encoder);
-	if (radeon_audio != 0 &&
-		(encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
-		radeon_audio_dpms(encoder, mode);
 }
 
 static void
@@ -1731,10 +1729,19 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int encoder_mode = atombios_get_encoder_mode(encoder);
 
 	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
 		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
 		  radeon_encoder->active_device);
+
+	if (connector && (radeon_audio != 0) &&
+	    ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+	     (ENCODER_MODE_IS_DP(encoder_mode) &&
+	      drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+		radeon_audio_dpms(encoder, mode);
+
 	switch (radeon_encoder->encoder_id) {
 	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -2136,6 +2143,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	int encoder_mode;
 
 	radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2163,10 +2171,6 @@
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
 		/* handled in dpms */
-		encoder_mode = atombios_get_encoder_mode(encoder);
-		if (radeon_audio != 0 &&
-			(encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
-			radeon_audio_mode_set(encoder, adjusted_mode);
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DDI:
 	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2188,6 +2192,13 @@
 	}
 
 	atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+	encoder_mode = atombios_get_encoder_mode(encoder);
+	if (connector && (radeon_audio != 0) &&
+	    ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+	     (ENCODER_MODE_IS_DP(encoder_mode) &&
+	      drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+		radeon_audio_mode_set(encoder, adjusted_mode);
 }
 
 static bool
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6a4ba2..3e670d3 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3613,6 +3613,8 @@
 	}
 
 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
 
 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
@@ -7230,6 +7232,8 @@
 	WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
 	/* grbm */
 	WREG32(GRBM_INT_CNTL, 0);
+	/* SRBM */
+	WREG32(SRBM_INT_CNTL, 0);
 	/* vline/vblank, etc. */
 	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -7551,6 +7555,9 @@
 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
 	return 0;
 }
 
@@ -8046,6 +8053,10 @@
 				break;
 			}
 			break;
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
 		case 124: /* UVD */
 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 03003f8..c648e19 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -482,6 +482,10 @@
 #define		SOFT_RESET_ORB				(1 << 23)
 #define		SOFT_RESET_VCE				(1 << 24)
 
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
 #define VM_L2_CNTL					0x1400
 #define		ENABLE_L2_CACHE					(1 << 0)
 #define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 192c803..3adc2af 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -26,6 +26,9 @@
 #include "radeon_audio.h"
 #include "sid.h"
 
+#define DCE8_DCCG_AUDIO_DTO1_PHASE	0x05b8
+#define DCE8_DCCG_AUDIO_DTO1_MODULE	0x05bc
+
 u32 dce6_endpoint_rreg(struct radeon_device *rdev,
 			      u32 block_offset, u32 reg)
 {
@@ -252,72 +255,67 @@
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
 	struct radeon_crtc *crtc, unsigned int clock)
 {
-    /* Two dtos; generally use dto0 for HDMI */
+	/* Two dtos; generally use dto0 for HDMI */
 	u32 value = 0;
 
-    if (crtc)
+	if (crtc)
 		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
 
 	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
 
-    /* Express [24MHz / target pixel clock] as an exact rational
-     * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
-     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
-     */
-    WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
-    WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
+	WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
 	struct radeon_crtc *crtc, unsigned int clock)
 {
-    /* Two dtos; generally use dto1 for DP */
+	/* Two dtos; generally use dto1 for DP */
 	u32 value = 0;
 	value |= DCCG_AUDIO_DTO_SEL;
 
-    if (crtc)
+	if (crtc)
 		value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
 
 	WREG32(DCCG_AUDIO_DTO_SOURCE, value);
 
-    /* Express [24MHz / target pixel clock] as an exact rational
-     * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
-     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
-     */
-    WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
-    WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	if (ASIC_IS_DCE8(rdev)) {
+		WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+		WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+	} else {
+		WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+		WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+	}
 }
 
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	uint32_t offset;
 
 	if (!dig || !dig->afmt)
 		return;
 
-	offset = dig->afmt->offset;
-
 	if (enable) {
-        if (dig->afmt->enabled)
-            return;
-
-		WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
-		WREG32(EVERGREEN_DP_SEC_CNTL + offset,
-			EVERGREEN_DP_SEC_ASP_ENABLE |		/* Audio packet transmission */
-			EVERGREEN_DP_SEC_ATP_ENABLE |		/* Audio timestamp packet transmission */
-			EVERGREEN_DP_SEC_AIP_ENABLE |		/* Audio infoframe packet transmission */
-			EVERGREEN_DP_SEC_STREAM_ENABLE);	/* Master enable for secondary stream engine */
-		radeon_audio_enable(rdev, dig->afmt->pin, true);
+		WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+		       EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
+		       EVERGREEN_DP_SEC_ASP_ENABLE |		/* Audio packet transmission */
+		       EVERGREEN_DP_SEC_ATP_ENABLE |		/* Audio timestamp packet transmission */
+		       EVERGREEN_DP_SEC_AIP_ENABLE |		/* Audio infoframe packet transmission */
+		       EVERGREEN_DP_SEC_STREAM_ENABLE);	/* Master enable for secondary stream engine */
 	} else {
-		if (!dig->afmt->enabled)
-			return;
-
-		WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
-		radeon_audio_enable(rdev, dig->afmt->pin, false);
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
 	}
 
 	dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 78600f5..973df06 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3253,6 +3253,8 @@
 	}
 
 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
 
 	evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4324,6 +4326,7 @@
 	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
 	WREG32(DMA_CNTL, tmp);
 	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(SRBM_INT_CNTL, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
 	if (rdev->num_crtc >= 4) {
@@ -4590,6 +4593,9 @@
 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
 
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
 	return 0;
 }
 
@@ -5066,6 +5072,10 @@
 				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
 				break;
 			}
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
 		case 124: /* UVD */
 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1d9aebc..c18d4ec 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -272,7 +272,7 @@
 }
 
 void dce4_dp_audio_set_dto(struct radeon_device *rdev,
-	struct radeon_crtc *crtc, unsigned int clock)
+			   struct radeon_crtc *crtc, unsigned int clock)
 {
 	u32 value;
 
@@ -294,7 +294,7 @@
 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 	 */
 	WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
-	WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
+	WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
 }
 
 void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
@@ -350,20 +350,9 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
-		HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
-		HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
-
 	WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
 		AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
 
-	WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
-		HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
-	WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
-		HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
-		HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
 	WREG32(AFMT_60958_0 + offset,
 		AFMT_60958_CS_CHANNEL_NUMBER_L(1));
 
@@ -408,15 +397,19 @@
 	if (!dig || !dig->afmt)
 		return;
 
-	/* Silent, r600_hdmi_enable will raise WARN for us */
-	if (enable && dig->afmt->enabled)
-		return;
-	if (!enable && !dig->afmt->enabled)
-		return;
+	if (enable) {
+		WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
+		       HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
-	if (!enable && dig->afmt->pin) {
-		radeon_audio_enable(rdev, dig->afmt->pin, 0);
-		dig->afmt->pin = NULL;
+		WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+		       HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+		       HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+		WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+		       HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+		       HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+	} else {
+		WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
 	}
 
 	dig->afmt->enabled = enable;
@@ -425,33 +418,28 @@
 		  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
 }
 
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	uint32_t offset;
 
 	if (!dig || !dig->afmt)
 		return;
 
-	offset = dig->afmt->offset;
-
 	if (enable) {
 		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 		struct radeon_connector_atom_dig *dig_connector;
 		uint32_t val;
 
-		if (dig->afmt->enabled)
-			return;
-
-		WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+		WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
+		       EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
 
 		if (radeon_connector->con_priv) {
 			dig_connector = radeon_connector->con_priv;
-			val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
+			val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
 			val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
 
 			if (dig_connector->dp_clock == 162000)
@@ -459,21 +447,16 @@
 			else
 				val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
 
-			WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
+			WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
 		}
 
-		WREG32(EVERGREEN_DP_SEC_CNTL + offset,
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
 			EVERGREEN_DP_SEC_ASP_ENABLE |		/* Audio packet transmission */
 			EVERGREEN_DP_SEC_ATP_ENABLE |		/* Audio timestamp packet transmission */
 			EVERGREEN_DP_SEC_AIP_ENABLE |		/* Audio infoframe packet transmission */
 			EVERGREEN_DP_SEC_STREAM_ENABLE);	/* Master enable for secondary stream engine */
-		radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
 	} else {
-		if (!dig->afmt->enabled)
-			return;
-
-		WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
-		radeon_audio_enable(rdev, dig->afmt->pin, 0);
+		WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
 	}
 
 	dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index ee83d2a..a8d1d52 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1191,6 +1191,10 @@
 #define		SOFT_RESET_REGBB			(1 << 22)
 #define		SOFT_RESET_ORB				(1 << 23)
 
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
 /* display watermarks */
 #define	DC_LB_MEMORY_SPLIT				  0x6b0c
 #define	PRIORITY_A_CNT			                  0x6b18
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 24242a7..dab00812 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -962,6 +962,8 @@
 	}
 
 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 0x1);
+	WREG32(SRBM_INT_ACK, 0x1);
 
 	evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -1086,12 +1088,12 @@
 
 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
 	    (rdev->flags & RADEON_IS_IGP)) {
-		if ((disabled_rb_mask & 3) == 1) {
-			/* RB0 disabled, RB1 enabled */
-			tmp = 0x11111111;
-		} else {
+		if ((disabled_rb_mask & 3) == 2) {
 			/* RB1 disabled, RB0 enabled */
 			tmp = 0x00000000;
+		} else {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
 		}
 	} else {
 		tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index ad71254..6b44580 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -82,6 +82,10 @@
 #define		SOFT_RESET_REGBB			(1 << 22)
 #define		SOFT_RESET_ORB				(1 << 23)
 
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
 #define	SRBM_STATUS2				        0x0EC4
 #define		DMA_BUSY 				(1 << 5)
 #define		DMA1_BUSY 				(1 << 6)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 279801c..04f2514 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -728,6 +728,10 @@
 		tmp |= RADEON_FP2_DETECT_MASK;
 	}
 	WREG32(RADEON_GEN_INT_CNTL, tmp);
+
+	/* read back to post the write */
+	RREG32(RADEON_GEN_INT_CNTL);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 07a71a2..2fcad34 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3784,6 +3784,9 @@
 		WREG32(RV770_CG_THERMAL_INT, thermal_int);
 	}
 
+	/* posting read */
+	RREG32(R_000E50_SRBM_STATUS);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 843b65f..fa21544 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -188,7 +188,7 @@
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 			radeon_crtc = to_radeon_crtc(crtc);
 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-				vrefresh = radeon_crtc->hw_mode.vrefresh;
+				vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
 				break;
 			}
 		}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 62c91ed..dd6606b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -476,17 +476,6 @@
 	if (!dig || !dig->afmt)
 		return;
 
-	/* Silent, r600_hdmi_enable will raise WARN for us */
-	if (enable && dig->afmt->enabled)
-		return;
-	if (!enable && !dig->afmt->enabled)
-		return;
-
-	if (!enable && dig->afmt->pin) {
-		radeon_audio_enable(rdev, dig->afmt->pin, 0);
-		dig->afmt->pin = NULL;
-	}
-
 	/* Older chipsets require setting HDMI and routing manually */
 	if (!ASIC_IS_DCE3(rdev)) {
 		if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index a3ceef6..b21ef69 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -101,8 +101,8 @@
 	struct drm_display_mode *mode);
 void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
 void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
-void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
-void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
+void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
+void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
 
 static const u32 pin_offsets[7] =
 {
@@ -210,7 +210,7 @@
 	.set_avi_packet = evergreen_set_avi_packet,
 	.set_audio_packet = dce4_set_audio_packet,
 	.mode_set = radeon_audio_dp_mode_set,
-	.dpms = evergreen_enable_dp_audio_packets,
+	.dpms = evergreen_dp_enable,
 };
 
 static struct radeon_audio_funcs dce6_hdmi_funcs = {
@@ -240,7 +240,7 @@
 	.set_avi_packet = evergreen_set_avi_packet,
 	.set_audio_packet = dce4_set_audio_packet,
 	.mode_set = radeon_audio_dp_mode_set,
-	.dpms = dce6_enable_dp_audio_packets,
+	.dpms = dce6_dp_enable,
 };
 
 static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -452,7 +452,7 @@
 }
 
 void radeon_audio_detect(struct drm_connector *connector,
-	enum drm_connector_status status)
+			 enum drm_connector_status status)
 {
 	struct radeon_device *rdev;
 	struct radeon_encoder *radeon_encoder;
@@ -483,14 +483,11 @@
 		else
 			radeon_encoder->audio = rdev->audio.hdmi_funcs;
 
-		radeon_audio_write_speaker_allocation(connector->encoder);
-		radeon_audio_write_sad_regs(connector->encoder);
-		if (connector->encoder->crtc)
-			radeon_audio_write_latency_fields(connector->encoder,
-				&connector->encoder->crtc->mode);
+		dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
 		radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
 	} else {
 		radeon_audio_enable(rdev, dig->afmt->pin, 0);
+		dig->afmt->pin = NULL;
 	}
 }
 
@@ -694,23 +691,22 @@
  * update the info frames with the data from the current display mode
  */
 static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
-	struct drm_display_mode *mode)
+				       struct drm_display_mode *mode)
 {
-	struct radeon_device *rdev = encoder->dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
 	if (!dig || !dig->afmt)
 		return;
 
-	/* disable audio prior to setting up hw */
-	dig->afmt->pin = radeon_audio_get_pin(encoder);
-	radeon_audio_enable(rdev, dig->afmt->pin, 0);
+	radeon_audio_set_mute(encoder, true);
 
+	radeon_audio_write_speaker_allocation(encoder);
+	radeon_audio_write_sad_regs(encoder);
+	radeon_audio_write_latency_fields(encoder, mode);
 	radeon_audio_set_dto(encoder, mode->clock);
 	radeon_audio_set_vbi_packet(encoder);
 	radeon_hdmi_set_color_depth(encoder);
-	radeon_audio_set_mute(encoder, false);
 	radeon_audio_update_acr(encoder, mode->clock);
 	radeon_audio_set_audio_packet(encoder);
 	radeon_audio_select_pin(encoder);
@@ -718,8 +714,7 @@
 	if (radeon_audio_set_avi_packet(encoder, mode) < 0)
 		return;
 
-	/* enable audio after to setting up hw */
-	radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+	radeon_audio_set_mute(encoder, false);
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
@@ -729,23 +724,26 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector =
+		radeon_connector->con_priv;
 
 	if (!dig || !dig->afmt)
 		return;
 
-	/* disable audio prior to setting up hw */
-	dig->afmt->pin = radeon_audio_get_pin(encoder);
-	radeon_audio_enable(rdev, dig->afmt->pin, 0);
-
-	radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+	radeon_audio_write_speaker_allocation(encoder);
+	radeon_audio_write_sad_regs(encoder);
+	radeon_audio_write_latency_fields(encoder, mode);
+	if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+		radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+	else
+		radeon_audio_set_dto(encoder, dig_connector->dp_clock);
 	radeon_audio_set_audio_packet(encoder);
 	radeon_audio_select_pin(encoder);
 
 	if (radeon_audio_set_avi_packet(encoder, mode) < 0)
 		return;
-
-	/* enable audio after to setting up hw */
-	radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c830863..4d0f96c 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -256,11 +256,13 @@
 	u32 ring = RADEON_CS_RING_GFX;
 	s32 priority = 0;
 
+	INIT_LIST_HEAD(&p->validated);
+
 	if (!cs->num_chunks) {
 		return 0;
 	}
+
 	/* get chunks */
-	INIT_LIST_HEAD(&p->validated);
 	p->idx = 0;
 	p->ib.sa_bo = NULL;
 	p->const_ib.sa_bo = NULL;
@@ -715,6 +717,7 @@
 	struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
 	struct radeon_device *rdev = p->rdev;
 	uint32_t header;
+	int ret = 0, i;
 
 	if (idx >= ib_chunk->length_dw) {
 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@@ -743,14 +746,25 @@
 		break;
 	default:
 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto dump_ib;
 	}
 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto dump_ib;
 	}
 	return 0;
+
+dump_ib:
+	for (i = 0; i < ib_chunk->length_dw; i++) {
+		if (i == idx)
+			printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
+		else
+			printk("\t0x%08x\n", radeon_get_ib_value(p, i));
+	}
+	return ret;
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 6b670b0..3a29703 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,9 +179,12 @@
 		    (rdev->pdev->subsystem_vendor == 0x1734) &&
 		    (rdev->pdev->subsystem_device == 0x1107))
 			use_bl = false;
+/* Older PPC macs use on-GPU backlight controller */
+#ifndef CONFIG_PPC_PMAC
 		/* disable native backlight control on older asics */
 		else if (rdev->family < CHIP_R600)
 			use_bl = false;
+#endif
 		else
 			use_bl = true;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5..df09ca7 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -1030,37 +1030,59 @@
 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+struct radeon_wait_cb {
+	struct fence_cb base;
+	struct task_struct *task;
+};
+
+static void
+radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct radeon_wait_cb *wait =
+		container_of(cb, struct radeon_wait_cb, base);
+
+	wake_up_process(wait->task);
+}
+
 static signed long radeon_fence_default_wait(struct fence *f, bool intr,
 					     signed long t)
 {
 	struct radeon_fence *fence = to_radeon_fence(f);
 	struct radeon_device *rdev = fence->rdev;
-	bool signaled;
+	struct radeon_wait_cb cb;
 
-	fence_enable_sw_signaling(&fence->base);
+	cb.task = current;
 
-	/*
-	 * This function has to return -EDEADLK, but cannot hold
-	 * exclusive_lock during the wait because some callers
-	 * may already hold it. This means checking needs_reset without
-	 * lock, and not fiddling with any gpu internals.
-	 *
-	 * The callback installed with fence_enable_sw_signaling will
-	 * run before our wait_event_*timeout call, so we will see
-	 * both the signaled fence and the changes to needs_reset.
-	 */
+	if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+		return t;
 
-	if (intr)
-		t = wait_event_interruptible_timeout(rdev->fence_queue,
-			((signaled = radeon_test_signaled(fence)) ||
-			 rdev->needs_reset), t);
-	else
-		t = wait_event_timeout(rdev->fence_queue,
-			((signaled = radeon_test_signaled(fence)) ||
-			 rdev->needs_reset), t);
+	while (t > 0) {
+		if (intr)
+			set_current_state(TASK_INTERRUPTIBLE);
+		else
+			set_current_state(TASK_UNINTERRUPTIBLE);
 
-	if (t > 0 && !signaled)
-		return -EDEADLK;
+		/*
+		 * radeon_test_signaled must be called after
+		 * set_current_state to prevent a race with wake_up_process
+		 */
+		if (radeon_test_signaled(fence))
+			break;
+
+		if (rdev->needs_reset) {
+			t = -EDEADLK;
+			break;
+		}
+
+		t = schedule_timeout(t);
+
+		if (t > 0 && intr && signal_pending(current))
+			t = -ERESTARTSYS;
+	}
+
+	__set_current_state(TASK_RUNNING);
+	fence_remove_callback(f, &cb.base);
+
 	return t;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 9f758d3..33cf410 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -852,6 +852,12 @@
 			single_display = false;
 	}
 
+	/* 120hz tends to be problematic even if they are under the
+	 * vblank limit.
+	 */
+	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
+		single_display = false;
+
 	/* certain older asics have a separare 3D performance state,
 	 * so try that first if the user selected performance
 	 */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d81182a..97a9048 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -694,6 +694,10 @@
 	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
 	if (ASIC_IS_DCE2(rdev))
 		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+
+	/* posting read */
+	RREG32(R_000040_GEN_INT_CNTL);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 73107fe..a7fb273 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3162,6 +3162,8 @@
 	}
 
 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+	WREG32(SRBM_INT_CNTL, 1);
+	WREG32(SRBM_INT_ACK, 1);
 
 	evergreen_fix_pci_max_read_req_size(rdev);
 
@@ -4699,12 +4701,6 @@
 		switch (pkt.type) {
 		case RADEON_PACKET_TYPE0:
 			dev_err(rdev->dev, "Packet0 not allowed!\n");
-			for (i = 0; i < ib->length_dw; i++) {
-				if (i == idx)
-					printk("\t0x%08x <---\n", ib->ptr[i]);
-				else
-					printk("\t0x%08x\n", ib->ptr[i]);
-			}
 			ret = -EINVAL;
 			break;
 		case RADEON_PACKET_TYPE2:
@@ -4736,8 +4732,15 @@
 			ret = -EINVAL;
 			break;
 		}
-		if (ret)
+		if (ret) {
+			for (i = 0; i < ib->length_dw; i++) {
+				if (i == idx)
+					printk("\t0x%08x <---\n", ib->ptr[i]);
+				else
+					printk("\t0x%08x\n", ib->ptr[i]);
+			}
 			break;
+		}
 	} while (idx < ib->length_dw);
 
 	return ret;
@@ -5910,6 +5913,7 @@
 	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
 	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
 	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(SRBM_INT_CNTL, 0);
 	if (rdev->num_crtc >= 2) {
 		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -6199,6 +6203,9 @@
 
 	WREG32(CG_THERMAL_INT, thermal_int);
 
+	/* posting read */
+	RREG32(SRBM_STATUS);
+
 	return 0;
 }
 
@@ -6609,6 +6616,10 @@
 				break;
 			}
 			break;
+		case 96:
+			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
+			WREG32(SRBM_INT_ACK, 0x1);
+			break;
 		case 124: /* UVD */
 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
@@ -7119,8 +7130,7 @@
 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
 
 	if (!vclk || !dclk) {
-		/* keep the Bypass mode, put PLL to sleep */
-		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		/* keep the Bypass mode */
 		return 0;
 	}
 
@@ -7136,8 +7146,7 @@
 	/* set VCO_MODE to 1 */
 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
 
-	/* toggle UPLL_SLEEP to 1 then back to 0 */
-	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+	/* disable sleep mode */
 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
 
 	/* deassert UPLL_RESET */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index cbd91d2..99a9835 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -358,6 +358,10 @@
 #define	CC_SYS_RB_BACKEND_DISABLE			0xe80
 #define	GC_USER_SYS_RB_BACKEND_DISABLE			0xe84
 
+#define SRBM_READ_ERROR					0xE98
+#define SRBM_INT_CNTL					0xEA0
+#define SRBM_INT_ACK					0xEA8
+
 #define	SRBM_STATUS2				        0x0EC4
 #define		DMA_BUSY 				(1 << 5)
 #define		DMA1_BUSY 				(1 << 6)
@@ -908,8 +912,8 @@
 
 #define DCCG_AUDIO_DTO0_PHASE                           0x05b0
 #define DCCG_AUDIO_DTO0_MODULE                          0x05b4
-#define DCCG_AUDIO_DTO1_PHASE                           0x05b8
-#define DCCG_AUDIO_DTO1_MODULE                          0x05bc
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
 
 #define AFMT_AUDIO_SRC_CONTROL                          0x713c
 #define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aaa84a..1a52522 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -997,8 +997,10 @@
 	crtc->state = NULL;
 
 	state = kzalloc(sizeof(*state), GFP_KERNEL);
-	if (state)
+	if (state) {
 		crtc->state = &state->base;
+		crtc->state->crtc = crtc;
+	}
 }
 
 static struct drm_crtc_state *
@@ -1012,6 +1014,7 @@
 		return NULL;
 
 	copy->base.mode_changed = false;
+	copy->base.active_changed = false;
 	copy->base.planes_changed = false;
 	copy->base.event = NULL;
 
@@ -1227,9 +1230,6 @@
 	/* program display mode */
 	tegra_dc_set_timings(dc, mode);
 
-	if (dc->soc->supports_border_color)
-		tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
 	/* interlacing isn't supported yet, so disable it */
 	if (dc->soc->supports_interlacing) {
 		value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@@ -1252,42 +1252,7 @@
 
 static void tegra_crtc_prepare(struct drm_crtc *crtc)
 {
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-	unsigned int syncpt;
-	unsigned long value;
-
 	drm_crtc_vblank_off(crtc);
-
-	if (dc->pipe)
-		syncpt = SYNCPT_VBLANK1;
-	else
-		syncpt = SYNCPT_VBLANK0;
-
-	/* initialize display controller */
-	tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-	tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
-
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
-	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-	tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
-	/* initialize timer */
-	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
-		WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
-	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
-	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
-		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
-	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
-	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
-	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 }
 
 static void tegra_crtc_commit(struct drm_crtc *crtc)
@@ -1664,6 +1629,8 @@
 	struct tegra_drm *tegra = drm->dev_private;
 	struct drm_plane *primary = NULL;
 	struct drm_plane *cursor = NULL;
+	unsigned int syncpt;
+	u32 value;
 	int err;
 
 	if (tegra->domain) {
@@ -1730,6 +1697,40 @@
 		goto cleanup;
 	}
 
+	/* initialize display controller */
+	if (dc->pipe)
+		syncpt = SYNCPT_VBLANK1;
+	else
+		syncpt = SYNCPT_VBLANK0;
+
+	tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+	tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+	/* initialize timer */
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+		WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+	if (dc->soc->supports_border_color)
+		tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
 	return 0;
 
 cleanup:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 7e06657..7eaaee74 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -851,6 +851,14 @@
 	h_back_porch = mode->htotal - mode->hsync_end;
 	h_front_porch = mode->hsync_start - mode->hdisplay;
 
+	err = clk_set_rate(hdmi->clk, pclk);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
+			err);
+	}
+
+	DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
+
 	/* power up sequence */
 	value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
 	value &= ~SOR_PLL_PDBG;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d395b0b..8d9b7de 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -74,7 +74,7 @@
 	pr_err("    has_type: %d\n", man->has_type);
 	pr_err("    use_type: %d\n", man->use_type);
 	pr_err("    flags: 0x%08X\n", man->flags);
-	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
+	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
 	pr_err("    size: %llu\n", man->size);
 	pr_err("    available_caching: 0x%08X\n", man->available_caching);
 	pr_err("    default_caching: 0x%08X\n", man->default_caching);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6c6b655..e13b9cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -725,32 +725,6 @@
 		goto out_err1;
 	}
 
-	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
-			     (dev_priv->vram_size >> PAGE_SHIFT));
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
-		goto out_err2;
-	}
-
-	dev_priv->has_gmr = true;
-	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
-	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-					 VMW_PL_GMR) != 0) {
-		DRM_INFO("No GMR memory available. "
-			 "Graphics memory resources are very limited.\n");
-		dev_priv->has_gmr = false;
-	}
-
-	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-		dev_priv->has_mob = true;
-		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-				   VMW_PL_MOB) != 0) {
-			DRM_INFO("No MOB memory available. "
-				 "3D will be disabled.\n");
-			dev_priv->has_mob = false;
-		}
-	}
-
 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 					       dev_priv->mmio_size);
 
@@ -813,6 +787,33 @@
 		goto out_no_fman;
 	}
 
+
+	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+			     (dev_priv->vram_size >> PAGE_SHIFT));
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+		goto out_no_vram;
+	}
+
+	dev_priv->has_gmr = true;
+	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+					 VMW_PL_GMR) != 0) {
+		DRM_INFO("No GMR memory available. "
+			 "Graphics memory resources are very limited.\n");
+		dev_priv->has_gmr = false;
+	}
+
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+		dev_priv->has_mob = true;
+		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+				   VMW_PL_MOB) != 0) {
+			DRM_INFO("No MOB memory available. "
+				 "3D will be disabled.\n");
+			dev_priv->has_mob = false;
+		}
+	}
+
 	vmw_kms_save_vga(dev_priv);
 
 	/* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@
 	vmw_kms_close(dev_priv);
 out_no_kms:
 	vmw_kms_restore_vga(dev_priv);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	if (dev_priv->has_gmr)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_no_vram:
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@
 	iounmap(dev_priv->mmio_virt);
 out_err3:
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-	if (dev_priv->has_gmr)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
-out_err2:
 	(void)ttm_bo_device_release(&dev_priv->bdev);
 out_err1:
 	vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@
 	}
 	vmw_kms_close(dev_priv);
 	vmw_overlay_close(dev_priv);
+
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	if (dev_priv->has_gmr)
+		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
-	if (dev_priv->has_gmr)
-		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 	(void)ttm_bo_device_release(&dev_priv->bdev);
 	vmw_ttm_global_release(dev_priv);
 
@@ -1235,6 +1238,7 @@
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
+	pci_disable_device(pdev);
 	drm_put_dev(dev);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33176d0..654c8da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -890,7 +890,8 @@
 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use MOB buffer.\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_no_reloc;
 	}
 	bo = &vmw_bo->base;
 
@@ -914,7 +915,7 @@
 
 out_no_reloc:
 	vmw_dmabuf_unreference(&vmw_bo);
-	vmw_bo_p = NULL;
+	*vmw_bo_p = NULL;
 	return ret;
 }
 
@@ -951,7 +952,8 @@
 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use GMR region.\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_no_reloc;
 	}
 	bo = &vmw_bo->base;
 
@@ -974,7 +976,7 @@
 
 out_no_reloc:
 	vmw_dmabuf_unreference(&vmw_bo);
-	vmw_bo_p = NULL;
+	*vmw_bo_p = NULL;
 	return ret;
 }
 
@@ -2780,13 +2782,11 @@
 				  NULL, arg->command_size, arg->throttle_us,
 				  (void __user *)(unsigned long)arg->fence_rep,
 				  NULL);
-
+	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		return ret;
 
 	vmw_kms_cursor_post_execbuf(dev_priv);
 
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return ret;
+	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8725b79..07cda8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2033,23 +2033,17 @@
 	int i;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-	if (unlikely(ret != 0))
-		return ret;
-
 	if (!arg->num_outputs) {
 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
 		vmw_du_update_layout(dev_priv, 1, &def_rect);
-		goto out_unlock;
+		return 0;
 	}
 
 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
 			GFP_KERNEL);
-	if (unlikely(!rects)) {
-		ret = -ENOMEM;
-		goto out_unlock;
-	}
+	if (unlikely(!rects))
+		return -ENOMEM;
 
 	user_rects = (void __user *)(unsigned long)arg->rects;
 	ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@
 
 out_free:
 	kfree(rects);
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
 }
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index b61d6be..3ddfb3d 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -459,6 +459,8 @@
 
 		clkrate = clk_get_rate(di->clk_ipu);
 		div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
+		if (div == 0)
+			div = 1;
 		rate = clkrate / div;
 
 		error = rate / (sig->mode.pixelclock / 1000);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index db4fb6e..56ce8c2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1872,6 +1872,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@
 #endif
 #if IS_ENABLED(CONFIG_HID_SAITEK)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
@@ -1957,6 +1959,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46edb4d..9c47867 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -586,6 +586,7 @@
 #define USB_VENDOR_ID_LOGITECH		0x046d
 #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
 #define USB_DEVICE_ID_LOGITECH_T651	0xb00c
+#define USB_DEVICE_ID_LOGITECH_C077	0xc007
 #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST  0xc110
 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -654,6 +655,7 @@
 #define USB_DEVICE_ID_MS_LK6K		0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT	0x0701
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB	0x0713
+#define USB_DEVICE_ID_MS_NE7K		0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K	0x0730
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500	0x076c
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
@@ -802,6 +804,7 @@
 #define USB_VENDOR_ID_SAITEK		0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD	0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000	0x0621
+#define USB_DEVICE_ID_SAITEK_RAT7_OLD	0x0ccb
 #define USB_DEVICE_ID_SAITEK_RAT7	0x0cd7
 #define USB_DEVICE_ID_SAITEK_MMO7	0x0cd0
 
@@ -896,6 +899,7 @@
 #define USB_VENDOR_ID_TIVO		0x150a
 #define USB_DEVICE_ID_TIVO_SLIDE_BT	0x1200
 #define USB_DEVICE_ID_TIVO_SLIDE	0x1201
+#define USB_DEVICE_ID_TIVO_SLIDE_PRO	0x1203
 
 #define USB_VENDOR_ID_TOPSEED		0x0766
 #define USB_DEVICE_ID_TOPSEED_CYBERLINK	0x0204
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index fbaea6e..af935eb 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -264,6 +264,8 @@
 		.driver_data = MS_ERGONOMY },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
 		.driver_data = MS_ERGONOMY },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
+		.driver_data = MS_ERGONOMY },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
 		.driver_data = MS_ERGONOMY | MS_RDESC },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 5632c54..a014f21 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -177,6 +177,8 @@
 static const struct hid_device_id saitek_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
 		.driver_data = SAITEK_FIX_PS1000 },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
+		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
 		.driver_data = SAITEK_RELEASE_MODE_RAT7 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6a58b6c..e54ce10 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -135,8 +135,9 @@
 {
 	struct hid_sensor_hub_callbacks_list *callback;
 	struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+	unsigned long flags;
 
-	spin_lock(&pdata->dyn_callback_lock);
+	spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
 	list_for_each_entry(callback, &pdata->dyn_callback_list, list)
 		if (callback->usage_id == usage_id &&
 			(collection_index >=
@@ -145,10 +146,11 @@
 				callback->hsdev->end_collection_index)) {
 			*priv = callback->priv;
 			*hsdev = callback->hsdev;
-			spin_unlock(&pdata->dyn_callback_lock);
+			spin_unlock_irqrestore(&pdata->dyn_callback_lock,
+					       flags);
 			return callback->usage_callback;
 		}
-	spin_unlock(&pdata->dyn_callback_lock);
+	spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
 	return NULL;
 }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 31e9d25..1896c01 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -804,7 +804,7 @@
 #define DS4_REPORT_0x81_SIZE 7
 #define SIXAXIS_REPORT_0xF2_SIZE 18
 
-static spinlock_t sony_dev_list_lock;
+static DEFINE_SPINLOCK(sony_dev_list_lock);
 static LIST_HEAD(sony_device_list);
 static DEFINE_IDA(sony_device_id_allocator);
 
@@ -1944,6 +1944,8 @@
 		return -ENOMEM;
 	}
 
+	spin_lock_init(&sc->lock);
+
 	sc->quirks = quirks;
 	hid_set_drvdata(hdev, sc);
 	sc->hdev = hdev;
@@ -2147,8 +2149,8 @@
 {
 	dbg_hid("Sony:%s\n", __func__);
 
-	ida_destroy(&sony_device_id_allocator);
 	hid_unregister_driver(&sony_driver);
+	ida_destroy(&sony_device_id_allocator);
 }
 module_init(sony_init);
 module_exit(sony_exit);
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index d790d8d..d986969 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -64,6 +64,7 @@
 	/* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, tivo_devices);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d43e967..36053f3 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -370,7 +370,10 @@
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
 	int ret, ret_size;
-	int size = ihid->bufsize;
+	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+	if (size > ihid->bufsize)
+		size = ihid->bufsize;
 
 	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
 	if (ret != size) {
@@ -785,7 +788,7 @@
 	dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
 
 	ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
-			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 			client->name, ihid);
 	if (ret < 0) {
 		dev_warn(&client->dev,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 9be99a67..a821277 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,7 @@
 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1a65079..bbe32d6 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -551,9 +551,13 @@
 	   (features->type == CINTIQ && !(data[1] & 0x40)))
 		return 1;
 
-	if (features->quirks & WACOM_QUIRK_MULTI_INPUT)
+	if (wacom->shared) {
 		wacom->shared->stylus_in_proximity = true;
 
+		if (wacom->shared->touch_down)
+			return 1;
+	}
+
 	/* in Range while exiting */
 	if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
 		input_report_key(input, BTN_TOUCH, 0);
@@ -778,6 +782,11 @@
 			input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
 			input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
 			input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+			if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
+				input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+			} else {
+				input_report_abs(input, ABS_MISC, 0);
+			}
 		} else if (features->type == CINTIQ_HYBRID) {
 			/*
 			 * Do not send hardware buttons under Android. They
@@ -1038,27 +1047,28 @@
 	struct input_dev *input = wacom->input;
 	unsigned char *data = wacom->data;
 	int i;
-	int current_num_contacts = 0;
+	int current_num_contacts = data[61];
 	int contacts_to_send = 0;
 	int num_contacts_left = 4; /* maximum contacts per packet */
 	int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
 	int y_offset = 2;
+	static int contact_with_no_pen_down_count = 0;
 
 	if (wacom->features.type == WACOM_27QHDT) {
 		current_num_contacts = data[63];
 		num_contacts_left = 10;
 		byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
 		y_offset = 0;
-	} else {
-		current_num_contacts = data[61];
 	}
 
 	/*
 	 * First packet resets the counter since only the first
 	 * packet in series will have non-zero current_num_contacts.
 	 */
-	if (current_num_contacts)
+	if (current_num_contacts) {
 		wacom->num_contacts_left = current_num_contacts;
+		contact_with_no_pen_down_count = 0;
+	}
 
 	contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
 
@@ -1091,15 +1101,16 @@
 				input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
 				input_report_abs(input, ABS_MT_ORIENTATION, w > h);
 			}
+			contact_with_no_pen_down_count++;
 		}
 	}
 	input_mt_report_pointer_emulation(input, true);
 
 	wacom->num_contacts_left -= contacts_to_send;
-	if (wacom->num_contacts_left <= 0)
+	if (wacom->num_contacts_left <= 0) {
 		wacom->num_contacts_left = 0;
-
-	wacom->shared->touch_down = (wacom->num_contacts_left > 0);
+		wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
+	}
 	return 1;
 }
 
@@ -1111,6 +1122,7 @@
 	int current_num_contacts = data[2];
 	int contacts_to_send = 0;
 	int x_offset = 0;
+	static int contact_with_no_pen_down_count = 0;
 
 	/* MTTPC does not support Height and Width */
 	if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1120,8 +1132,10 @@
 	 * First packet resets the counter since only the first
 	 * packet in series will have non-zero current_num_contacts.
 	 */
-	if (current_num_contacts)
+	if (current_num_contacts) {
 		wacom->num_contacts_left = current_num_contacts;
+		contact_with_no_pen_down_count = 0;
+	}
 
 	/* There are at most 5 contacts per packet */
 	contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1142,15 +1156,16 @@
 			int y = get_unaligned_le16(&data[offset + x_offset + 9]);
 			input_report_abs(input, ABS_MT_POSITION_X, x);
 			input_report_abs(input, ABS_MT_POSITION_Y, y);
+			contact_with_no_pen_down_count++;
 		}
 	}
 	input_mt_report_pointer_emulation(input, true);
 
 	wacom->num_contacts_left -= contacts_to_send;
-	if (wacom->num_contacts_left < 0)
+	if (wacom->num_contacts_left <= 0) {
 		wacom->num_contacts_left = 0;
-
-	wacom->shared->touch_down = (wacom->num_contacts_left > 0);
+		wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
+	}
 	return 1;
 }
 
@@ -1188,29 +1203,25 @@
 {
 	unsigned char *data = wacom->data;
 	struct input_dev *input = wacom->input;
-	bool prox;
+	bool prox = !wacom->shared->stylus_in_proximity;
 	int x = 0, y = 0;
 
 	if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
 		return 0;
 
-	if (!wacom->shared->stylus_in_proximity) {
-		if (len == WACOM_PKGLEN_TPC1FG) {
-			prox = data[0] & 0x01;
-			x = get_unaligned_le16(&data[1]);
-			y = get_unaligned_le16(&data[3]);
-		} else if (len == WACOM_PKGLEN_TPC1FG_B) {
-			prox = data[2] & 0x01;
-			x = get_unaligned_le16(&data[3]);
-			y = get_unaligned_le16(&data[5]);
-		} else {
-			prox = data[1] & 0x01;
-			x = le16_to_cpup((__le16 *)&data[2]);
-			y = le16_to_cpup((__le16 *)&data[4]);
-		}
-	} else
-		/* force touch out when pen is in prox */
-		prox = 0;
+	if (len == WACOM_PKGLEN_TPC1FG) {
+		prox = prox && (data[0] & 0x01);
+		x = get_unaligned_le16(&data[1]);
+		y = get_unaligned_le16(&data[3]);
+	} else if (len == WACOM_PKGLEN_TPC1FG_B) {
+		prox = prox && (data[2] & 0x01);
+		x = get_unaligned_le16(&data[3]);
+		y = get_unaligned_le16(&data[5]);
+	} else {
+		prox = prox && (data[1] & 0x01);
+		x = le16_to_cpup((__le16 *)&data[2]);
+		y = le16_to_cpup((__le16 *)&data[4]);
+	}
 
 	if (prox) {
 		input_report_abs(input, ABS_X, x);
@@ -1608,6 +1619,7 @@
 	struct input_dev *pad_input = wacom->pad_input;
 	unsigned char *data = wacom->data;
 	int i;
+	int contact_with_no_pen_down_count = 0;
 
 	if (data[0] != 0x02)
 	    return 0;
@@ -1635,6 +1647,7 @@
 			}
 			input_report_abs(input, ABS_MT_POSITION_X, x);
 			input_report_abs(input, ABS_MT_POSITION_Y, y);
+			contact_with_no_pen_down_count++;
 		}
 	}
 
@@ -1644,11 +1657,12 @@
 	input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
 	input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
 	input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
+	wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
 
 	return 1;
 }
 
-static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
 {
 	struct wacom_features *features = &wacom->features;
 	struct input_dev *input = wacom->input;
@@ -1656,7 +1670,7 @@
 	int slot = input_mt_get_slot_by_key(input, data[0]);
 
 	if (slot < 0)
-		return;
+		return 0;
 
 	touch = touch && !wacom->shared->stylus_in_proximity;
 
@@ -1688,7 +1702,9 @@
 		input_report_abs(input, ABS_MT_POSITION_Y, y);
 		input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
 		input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
+		last_touch_count++;
 	}
+	return last_touch_count;
 }
 
 static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1713,6 +1729,7 @@
 	unsigned char *data = wacom->data;
 	int count = data[1] & 0x07;
 	int i;
+	int contact_with_no_pen_down_count = 0;
 
 	if (data[0] != 0x02)
 	    return 0;
@@ -1723,12 +1740,15 @@
 		int msg_id = data[offset];
 
 		if (msg_id >= 2 && msg_id <= 17)
-			wacom_bpt3_touch_msg(wacom, data + offset);
+			contact_with_no_pen_down_count = 
+			    wacom_bpt3_touch_msg(wacom, data + offset,
+						 contact_with_no_pen_down_count);
 		else if (msg_id == 128)
 			wacom_bpt3_button_msg(wacom, data + offset);
 
 	}
 	input_mt_report_pointer_emulation(input, true);
+	wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
 
 	return 1;
 }
@@ -1754,6 +1774,9 @@
 		return 0;
 	}
 
+	if (wacom->shared->touch_down)
+		return 0;
+
 	prox = (data[1] & 0x20) == 0x20;
 
 	/*
@@ -2725,9 +2748,9 @@
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
-	{ "Wacom Cintiq 27QHD", 119740, 67520, 2047,
-	  63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
-	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	{ "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
 	{ "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
 	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index bce4e9f..6c99ee7 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -147,6 +147,9 @@
 						    &ads2830_regmap_config);
 	}
 
+	if (IS_ERR(data->regmap))
+		return PTR_ERR(data->regmap);
+
 	data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
 	if (!diff_input)
 		data->cmd_byte |= ADS7828_CMD_SD_SE;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ab838d9..22da9c2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,7 +79,7 @@
 
 config I2C_HIX5HD2
 	tristate "Hix5hd2 high-speed I2C driver"
-	depends on ARCH_HIX5HD2
+	depends on ARCH_HIX5HD2 || COMPILE_TEST
 	help
 	  Say Y here to include support for high-speed I2C controller in the
 	  Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@
 	  This support is also available as a module.  If so, the module
 	  will be called i2c-bcm2835.
 
+config I2C_BCM_IPROC
+	tristate "Broadcom iProc I2C controller"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	default ARCH_BCM_IPROC
+	help
+	  If you say yes to this option, support will be included for the
+	  Broadcom iProc I2C controller.
+
+	  If you don't know what to do here, say N.
+
 config I2C_BCM_KONA
 	tristate "BCM Kona I2C adapter"
 	depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-designware-pci.
 
+config I2C_DESIGNWARE_BAYTRAIL
+	bool "Intel Baytrail I2C semaphore support"
+	depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
+	help
+	  This driver enables managed host access to the PMIC I2C bus on select
+	  Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
+	  the host to request uninterrupted access to the PMIC's I2C bus from
+	  the platform firmware controlling it. You should say Y if running on
+	  a BayTrail system using the AXP288.
+
 config I2C_EFM32
 	tristate "EFM32 I2C controller"
 	depends on ARCH_EFM32 || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 56388f6..3638feb 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_I2C_AU1550)	+= i2c-au1550.o
 obj-$(CONFIG_I2C_AXXIA)		+= i2c-axxia.o
 obj-$(CONFIG_I2C_BCM2835)	+= i2c-bcm2835.o
+obj-$(CONFIG_I2C_BCM_IPROC)	+= i2c-bcm-iproc.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI)	+= i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CADENCE)	+= i2c-cadence.o
 obj-$(CONFIG_I2C_CBUS_GPIO)	+= i2c-cbus-gpio.o
@@ -41,6 +42,7 @@
 obj-$(CONFIG_I2C_DESIGNWARE_CORE)	+= i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)	+= i2c-designware-platform.o
 i2c-designware-platform-objs := i2c-designware-platdrv.o
+i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)	+= i2c-designware-pci.o
 i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_EFM32)		+= i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644
index 0000000..d3c8915
--- /dev/null
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CFG_OFFSET                   0x00
+#define CFG_RESET_SHIFT              31
+#define CFG_EN_SHIFT                 30
+#define CFG_M_RETRY_CNT_SHIFT        16
+#define CFG_M_RETRY_CNT_MASK         0x0f
+
+#define TIM_CFG_OFFSET               0x04
+#define TIM_CFG_MODE_400_SHIFT       31
+
+#define M_FIFO_CTRL_OFFSET           0x0c
+#define M_FIFO_RX_FLUSH_SHIFT        31
+#define M_FIFO_TX_FLUSH_SHIFT        30
+#define M_FIFO_RX_CNT_SHIFT          16
+#define M_FIFO_RX_CNT_MASK           0x7f
+#define M_FIFO_RX_THLD_SHIFT         8
+#define M_FIFO_RX_THLD_MASK          0x3f
+
+#define M_CMD_OFFSET                 0x30
+#define M_CMD_START_BUSY_SHIFT       31
+#define M_CMD_STATUS_SHIFT           25
+#define M_CMD_STATUS_MASK            0x07
+#define M_CMD_STATUS_SUCCESS         0x0
+#define M_CMD_STATUS_LOST_ARB        0x1
+#define M_CMD_STATUS_NACK_ADDR       0x2
+#define M_CMD_STATUS_NACK_DATA       0x3
+#define M_CMD_STATUS_TIMEOUT         0x4
+#define M_CMD_PROTOCOL_SHIFT         9
+#define M_CMD_PROTOCOL_MASK          0xf
+#define M_CMD_PROTOCOL_BLK_WR        0x7
+#define M_CMD_PROTOCOL_BLK_RD        0x8
+#define M_CMD_PEC_SHIFT              8
+#define M_CMD_RD_CNT_SHIFT           0
+#define M_CMD_RD_CNT_MASK            0xff
+
+#define IE_OFFSET                    0x38
+#define IE_M_RX_FIFO_FULL_SHIFT      31
+#define IE_M_RX_THLD_SHIFT           30
+#define IE_M_START_BUSY_SHIFT        28
+
+#define IS_OFFSET                    0x3c
+#define IS_M_RX_FIFO_FULL_SHIFT      31
+#define IS_M_RX_THLD_SHIFT           30
+#define IS_M_START_BUSY_SHIFT        28
+
+#define M_TX_OFFSET                  0x40
+#define M_TX_WR_STATUS_SHIFT         31
+#define M_TX_DATA_SHIFT              0
+#define M_TX_DATA_MASK               0xff
+
+#define M_RX_OFFSET                  0x44
+#define M_RX_STATUS_SHIFT            30
+#define M_RX_STATUS_MASK             0x03
+#define M_RX_PEC_ERR_SHIFT           29
+#define M_RX_DATA_SHIFT              0
+#define M_RX_DATA_MASK               0xff
+
+#define I2C_TIMEOUT_MESC             100
+#define M_TX_RX_FIFO_SIZE            64
+
+enum bus_speed_index {
+	I2C_SPD_100K = 0,
+	I2C_SPD_400K,
+};
+
+struct bcm_iproc_i2c_dev {
+	struct device *device;
+	int irq;
+
+	void __iomem *base;
+
+	struct i2c_adapter adapter;
+
+	struct completion done;
+	int xfer_is_done;
+};
+
+/*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+	struct bcm_iproc_i2c_dev *iproc_i2c = data;
+	u32 status = readl(iproc_i2c->base + IS_OFFSET);
+
+	status &= ISR_MASK;
+
+	if (!status)
+		return IRQ_NONE;
+
+	writel(status, iproc_i2c->base + IS_OFFSET);
+	iproc_i2c->xfer_is_done = 1;
+	complete_all(&iproc_i2c->done);
+
+	return IRQ_HANDLED;
+}
+
+static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
+				      struct i2c_msg *msg)
+{
+	u32 val;
+
+	val = readl(iproc_i2c->base + M_CMD_OFFSET);
+	val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
+
+	switch (val) {
+	case M_CMD_STATUS_SUCCESS:
+		return 0;
+
+	case M_CMD_STATUS_LOST_ARB:
+		dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+		return -EAGAIN;
+
+	case M_CMD_STATUS_NACK_ADDR:
+		dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+		return -ENXIO;
+
+	case M_CMD_STATUS_NACK_DATA:
+		dev_dbg(iproc_i2c->device, "NAK data\n");
+		return -ENXIO;
+
+	case M_CMD_STATUS_TIMEOUT:
+		dev_dbg(iproc_i2c->device, "bus timeout\n");
+		return -ETIMEDOUT;
+
+	default:
+		dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+		return -EIO;
+	}
+}
+
+static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
+					 struct i2c_msg *msg)
+{
+	int ret, i;
+	u8 addr;
+	u32 val;
+	unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+
+	/* need to reserve one byte in the FIFO for the slave address */
+	if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
+		dev_err(iproc_i2c->device,
+			"only support data length up to %u bytes\n",
+			M_TX_RX_FIFO_SIZE - 1);
+		return -EOPNOTSUPP;
+	}
+
+	/* check if bus is busy */
+	if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
+	       BIT(M_CMD_START_BUSY_SHIFT))) {
+		dev_warn(iproc_i2c->device, "bus is busy\n");
+		return -EBUSY;
+	}
+
+	/* format and load slave address into the TX FIFO */
+	addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+	writel(addr, iproc_i2c->base + M_TX_OFFSET);
+
+	/* for a write transaction, load data into the TX FIFO */
+	if (!(msg->flags & I2C_M_RD)) {
+		for (i = 0; i < msg->len; i++) {
+			val = msg->buf[i];
+
+			/* mark the last byte */
+			if (i == msg->len - 1)
+				val |= 1 << M_TX_WR_STATUS_SHIFT;
+
+			writel(val, iproc_i2c->base + M_TX_OFFSET);
+		}
+	}
+
+	/* mark as incomplete before starting the transaction */
+	reinit_completion(&iproc_i2c->done);
+	iproc_i2c->xfer_is_done = 0;
+
+	/*
+	 * Enable the "start busy" interrupt, which will be triggered after the
+	 * transaction is done, i.e., the internal start_busy bit, transitions
+	 * from 1 to 0.
+	 */
+	writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+
+	/*
+	 * Now we can activate the transfer. For a read operation, specify the
+	 * number of bytes to read
+	 */
+	val = 1 << M_CMD_START_BUSY_SHIFT;
+	if (msg->flags & I2C_M_RD) {
+		val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+		       (msg->len << M_CMD_RD_CNT_SHIFT);
+	} else {
+		val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
+	}
+	writel(val, iproc_i2c->base + M_CMD_OFFSET);
+
+	time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
+
+	/* disable all interrupts */
+	writel(0, iproc_i2c->base + IE_OFFSET);
+	/* read it back to flush the write */
+	readl(iproc_i2c->base + IE_OFFSET);
+
+	/* make sure the interrupt handler isn't running */
+	synchronize_irq(iproc_i2c->irq);
+
+	if (!time_left && !iproc_i2c->xfer_is_done) {
+		dev_err(iproc_i2c->device, "transaction timed out\n");
+
+		/* flush FIFOs */
+		val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+		      (1 << M_FIFO_TX_FLUSH_SHIFT);
+		writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+		return -ETIMEDOUT;
+	}
+
+	ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+	if (ret) {
+		/* flush both TX/RX FIFOs */
+		val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+		      (1 << M_FIFO_TX_FLUSH_SHIFT);
+		writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+		return ret;
+	}
+
+	/*
+	 * For a read operation, we now need to load the data from FIFO
+	 * into the memory buffer
+	 */
+	if (msg->flags & I2C_M_RD) {
+		for (i = 0; i < msg->len; i++) {
+			msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
+				      M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+		}
+	}
+
+	return 0;
+}
+
+static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
+			      struct i2c_msg msgs[], int num)
+{
+	struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
+	int ret, i;
+
+	/* go through all messages */
+	for (i = 0; i < num; i++) {
+		ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
+		if (ret) {
+			dev_dbg(iproc_i2c->device, "xfer failed\n");
+			return ret;
+		}
+	}
+
+	return num;
+}
+
+static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm_iproc_algo = {
+	.master_xfer = bcm_iproc_i2c_xfer,
+	.functionality = bcm_iproc_i2c_functionality,
+};
+
+static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+	unsigned int bus_speed;
+	u32 val;
+	int ret = of_property_read_u32(iproc_i2c->device->of_node,
+				       "clock-frequency", &bus_speed);
+	if (ret < 0) {
+		dev_info(iproc_i2c->device,
+			"unable to interpret clock-frequency DT property\n");
+		bus_speed = 100000;
+	}
+
+	if (bus_speed < 100000) {
+		dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
+			bus_speed);
+		dev_err(iproc_i2c->device,
+			"valid speeds are 100khz and 400khz\n");
+		return -EINVAL;
+	} else if (bus_speed < 400000) {
+		bus_speed = 100000;
+	} else {
+		bus_speed = 400000;
+	}
+
+	val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
+	val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+	val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+	writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+
+	dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
+
+	return 0;
+}
+
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+	u32 val;
+
+	/* put controller in reset */
+	val = readl(iproc_i2c->base + CFG_OFFSET);
+	val |= 1 << CFG_RESET_SHIFT;
+	val &= ~(1 << CFG_EN_SHIFT);
+	writel(val, iproc_i2c->base + CFG_OFFSET);
+
+	/* wait 100 usec per spec */
+	udelay(100);
+
+	/* bring controller out of reset */
+	val &= ~(1 << CFG_RESET_SHIFT);
+	writel(val, iproc_i2c->base + CFG_OFFSET);
+
+	/* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+	val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+	writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+
+	/* disable all interrupts */
+	writel(0, iproc_i2c->base + IE_OFFSET);
+
+	/* clear all pending interrupts */
+	writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+	return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+					 bool enable)
+{
+	u32 val;
+
+	val = readl(iproc_i2c->base + CFG_OFFSET);
+	if (enable)
+		val |= BIT(CFG_EN_SHIFT);
+	else
+		val &= ~BIT(CFG_EN_SHIFT);
+	writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
+static int bcm_iproc_i2c_probe(struct platform_device *pdev)
+{
+	int irq, ret = 0;
+	struct bcm_iproc_i2c_dev *iproc_i2c;
+	struct i2c_adapter *adap;
+	struct resource *res;
+
+	iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
+				 GFP_KERNEL);
+	if (!iproc_i2c)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, iproc_i2c);
+	iproc_i2c->device = &pdev->dev;
+	init_completion(&iproc_i2c->done);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+	if (IS_ERR(iproc_i2c->base))
+		return PTR_ERR(iproc_i2c->base);
+
+	ret = bcm_iproc_i2c_init(iproc_i2c);
+	if (ret)
+		return ret;
+
+	ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
+	if (ret)
+		return ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(iproc_i2c->device, "no irq resource\n");
+		return irq;
+	}
+	iproc_i2c->irq = irq;
+
+	ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
+			       pdev->name, iproc_i2c);
+	if (ret < 0) {
+		dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
+		return ret;
+	}
+
+	bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
+	adap = &iproc_i2c->adapter;
+	i2c_set_adapdata(adap, iproc_i2c);
+	strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
+	adap->algo = &bcm_iproc_algo;
+	adap->dev.parent = &pdev->dev;
+	adap->dev.of_node = pdev->dev.of_node;
+
+	ret = i2c_add_adapter(adap);
+	if (ret) {
+		dev_err(iproc_i2c->device, "failed to add adapter\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int bcm_iproc_i2c_remove(struct platform_device *pdev)
+{
+	struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
+
+	/* make sure there's no pending interrupt when we remove the adapter */
+	writel(0, iproc_i2c->base + IE_OFFSET);
+	readl(iproc_i2c->base + IE_OFFSET);
+	synchronize_irq(iproc_i2c->irq);
+
+	i2c_del_adapter(&iproc_i2c->adapter);
+	bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+
+	return 0;
+}
+
+static const struct of_device_id bcm_iproc_i2c_of_match[] = {
+	{ .compatible = "brcm,iproc-i2c" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
+
+static struct platform_driver bcm_iproc_i2c_driver = {
+	.driver = {
+		.name = "bcm-iproc-i2c",
+		.of_match_table = bcm_iproc_i2c_of_match,
+	},
+	.probe = bcm_iproc_i2c_probe,
+	.remove = bcm_iproc_i2c_remove,
+};
+module_platform_driver(bcm_iproc_i2c_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 626f74e..7d7a14c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -128,6 +128,7 @@
  * @suspended:		Flag holding the device's PM status
  * @send_count:		Number of bytes still expected to send
  * @recv_count:		Number of bytes still expected to receive
+ * @curr_recv_count:	Number of bytes to be received in current transfer
  * @irq:		IRQ number
  * @input_clk:		Input clock to I2C controller
  * @i2c_clk:		Maximum I2C clock speed
@@ -146,6 +147,7 @@
 	u8 suspended;
 	unsigned int send_count;
 	unsigned int recv_count;
+	unsigned int curr_recv_count;
 	int irq;
 	unsigned long input_clk;
 	unsigned int i2c_clk;
@@ -182,14 +184,15 @@
  */
 static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 {
-	unsigned int isr_status, avail_bytes;
-	unsigned int bytes_to_recv, bytes_to_send;
+	unsigned int isr_status, avail_bytes, updatetx;
+	unsigned int bytes_to_send;
 	struct cdns_i2c *id = ptr;
 	/* Signal completion only after everything is updated */
 	int done_flag = 0;
 	irqreturn_t status = IRQ_NONE;
 
 	isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+	cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
 
 	/* Handling nack and arbitration lost interrupt */
 	if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@
 		status = IRQ_HANDLED;
 	}
 
-	/* Handling Data interrupt */
-	if ((isr_status & CDNS_I2C_IXR_DATA) &&
-			(id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
-		/* Always read data interrupt threshold bytes */
-		bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
-		id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
-		avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+	/*
+	 * Check if transfer size register needs to be updated again for a
+	 * large data receive operation.
+	 */
+	updatetx = 0;
+	if (id->recv_count > id->curr_recv_count)
+		updatetx = 1;
 
-		/*
-		 * if the tranfer size register value is zero, then
-		 * check for the remaining bytes and update the
-		 * transfer size register.
-		 */
-		if (!avail_bytes) {
-			if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
-				cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-						CDNS_I2C_XFER_SIZE_OFFSET);
-			else
-				cdns_i2c_writereg(id->recv_count,
-						CDNS_I2C_XFER_SIZE_OFFSET);
-		}
+	/* When receiving, handle data interrupt and completion interrupt */
+	if (id->p_recv_buf &&
+	    ((isr_status & CDNS_I2C_IXR_COMP) ||
+	     (isr_status & CDNS_I2C_IXR_DATA))) {
+		/* Read data if receive data valid is set */
+		while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
+		       CDNS_I2C_SR_RXDV) {
+			/*
+			 * Clear hold bit that was set for FIFO control if
+			 * RX data left is less than FIFO depth, unless
+			 * repeated start is selected.
+			 */
+			if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
+			    !id->bus_hold_flag)
+				cdns_i2c_clear_bus_hold(id);
 
-		/* Process the data received */
-		while (bytes_to_recv--)
 			*(id->p_recv_buf)++ =
 				cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+			id->recv_count--;
+			id->curr_recv_count--;
 
-		if (!id->bus_hold_flag &&
-				(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
-			cdns_i2c_clear_bus_hold(id);
+			if (updatetx &&
+			    (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+				break;
+		}
+
+		/*
+		 * The controller sends NACK to the slave when transfer size
+		 * register reaches zero without considering the HOLD bit.
+		 * This workaround is implemented for large data transfers to
+		 * maintain transfer size non-zero while performing a large
+		 * receive operation.
+		 */
+		if (updatetx &&
+		    (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+			/* wait while fifo is full */
+			while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
+			       (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
+				;
+
+			/*
+			 * Check number of bytes to be received against maximum
+			 * transfer size and update register accordingly.
+			 */
+			if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
+			    CDNS_I2C_TRANSFER_SIZE) {
+				cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+						  CDNS_I2C_XFER_SIZE_OFFSET);
+				id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
+						      CDNS_I2C_FIFO_DEPTH;
+			} else {
+				cdns_i2c_writereg(id->recv_count -
+						  CDNS_I2C_FIFO_DEPTH,
+						  CDNS_I2C_XFER_SIZE_OFFSET);
+				id->curr_recv_count = id->recv_count;
+			}
+		}
+
+		/* Clear hold (if not repeated start) and signal completion */
+		if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
+			if (!id->bus_hold_flag)
+				cdns_i2c_clear_bus_hold(id);
+			done_flag = 1;
+		}
 
 		status = IRQ_HANDLED;
 	}
 
-	/* Handling Transfer Complete interrupt */
-	if (isr_status & CDNS_I2C_IXR_COMP) {
-		if (!id->p_recv_buf) {
-			/*
-			 * If the device is sending data If there is further
-			 * data to be sent. Calculate the available space
-			 * in FIFO and fill the FIFO with that many bytes.
-			 */
-			if (id->send_count) {
-				avail_bytes = CDNS_I2C_FIFO_DEPTH -
-				    cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-				if (id->send_count > avail_bytes)
-					bytes_to_send = avail_bytes;
-				else
-					bytes_to_send = id->send_count;
+	/* When sending, handle transfer complete interrupt */
+	if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
+		/*
+		 * If there is more data to be sent, calculate the
+		 * space available in FIFO and fill with that many bytes.
+		 */
+		if (id->send_count) {
+			avail_bytes = CDNS_I2C_FIFO_DEPTH -
+			    cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+			if (id->send_count > avail_bytes)
+				bytes_to_send = avail_bytes;
+			else
+				bytes_to_send = id->send_count;
 
-				while (bytes_to_send--) {
-					cdns_i2c_writereg(
-						(*(id->p_send_buf)++),
-						 CDNS_I2C_DATA_OFFSET);
-					id->send_count--;
-				}
-			} else {
-				/*
-				 * Signal the completion of transaction and
-				 * clear the hold bus bit if there are no
-				 * further messages to be processed.
-				 */
-				done_flag = 1;
+			while (bytes_to_send--) {
+				cdns_i2c_writereg(
+					(*(id->p_send_buf)++),
+					 CDNS_I2C_DATA_OFFSET);
+				id->send_count--;
 			}
-			if (!id->send_count && !id->bus_hold_flag)
-				cdns_i2c_clear_bus_hold(id);
 		} else {
-			if (!id->bus_hold_flag)
-				cdns_i2c_clear_bus_hold(id);
 			/*
-			 * If the device is receiving data, then signal
-			 * the completion of transaction and read the data
-			 * present in the FIFO. Signal the completion of
-			 * transaction.
+			 * Signal the completion of transaction and
+			 * clear the hold bus bit if there are no
+			 * further messages to be processed.
 			 */
-			while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
-					CDNS_I2C_SR_RXDV) {
-				*(id->p_recv_buf)++ =
-					cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
-				id->recv_count--;
-			}
 			done_flag = 1;
 		}
+		if (!id->send_count && !id->bus_hold_flag)
+			cdns_i2c_clear_bus_hold(id);
 
 		status = IRQ_HANDLED;
 	}
@@ -289,8 +315,6 @@
 	if (id->err_status)
 		status = IRQ_HANDLED;
 
-	cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
-
 	if (done_flag)
 		complete(&id->xfer_done);
 
@@ -316,6 +340,8 @@
 	if (id->p_msg->flags & I2C_M_RECV_LEN)
 		id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
 
+	id->curr_recv_count = id->recv_count;
+
 	/*
 	 * Check for the message size against FIFO depth and set the
 	 * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@
 	 * receive if it is less than transfer size and transfer size if
 	 * it is more. Enable the interrupts.
 	 */
-	if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+	if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
 		cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
 				  CDNS_I2C_XFER_SIZE_OFFSET);
-	else
+		id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+	} else {
 		cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
+	}
+
 	/* Clear the bus hold flag if bytes to receive is less than FIFO size */
 	if (!id->bus_hold_flag &&
 		((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@
 	 * processed with a repeated start.
 	 */
 	if (num > 1) {
+		/*
+		 * This controller does not give completion interrupt after a
+		 * master receive message if HOLD bit is set (repeated start),
+		 * resulting in SW timeout. Hence, if a receive message is
+		 * followed by any other message, an error is returned
+		 * indicating that this sequence is not supported.
+		 */
+		for (count = 0; count < num - 1; count++) {
+			if (msgs[count].flags & I2C_M_RD) {
+				dev_warn(adap->dev.parent,
+					 "Can't do repeated start after a receive message\n");
+				return -EOPNOTSUPP;
+			}
+		}
 		id->bus_hold_flag = 1;
 		reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
 		reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644
index 0000000..7d7ae97
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -0,0 +1,162 @@
+/*
+ * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <asm/iosf_mbi.h>
+
+#include "i2c-designware-core.h"
+
+#define SEMAPHORE_TIMEOUT	100
+#define PUNIT_SEMAPHORE		0x7
+#define PUNIT_SEMAPHORE_BIT	BIT(0)
+#define PUNIT_SEMAPHORE_ACQUIRE	BIT(1)
+
+static unsigned long acquired;
+
+static int get_sem(struct device *dev, u32 *sem)
+{
+	u32 data;
+	int ret;
+
+	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
+				&data);
+	if (ret) {
+		dev_err(dev, "iosf failed to read punit semaphore\n");
+		return ret;
+	}
+
+	*sem = data & PUNIT_SEMAPHORE_BIT;
+
+	return 0;
+}
+
+static void reset_semaphore(struct device *dev)
+{
+	u32 data;
+
+	if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+				PUNIT_SEMAPHORE, &data)) {
+		dev_err(dev, "iosf failed to reset punit semaphore during read\n");
+		return;
+	}
+
+	data &= ~PUNIT_SEMAPHORE_BIT;
+	if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+				PUNIT_SEMAPHORE, data))
+		dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+}
+
+static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
+{
+	u32 sem;
+	int ret;
+	unsigned long start, end;
+
+	might_sleep();
+
+	if (!dev || !dev->dev)
+		return -ENODEV;
+
+	if (!dev->release_lock)
+		return 0;
+
+	/* host driver writes to side band semaphore register */
+	ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+				PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE);
+	if (ret) {
+		dev_err(dev->dev, "iosf punit semaphore request failed\n");
+		return ret;
+	}
+
+	/* host driver waits for bit 0 to be set in semaphore register */
+	start = jiffies;
+	end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+	do {
+		ret = get_sem(dev->dev, &sem);
+		if (!ret && sem) {
+			acquired = jiffies;
+			dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
+				jiffies_to_msecs(jiffies - start));
+			return 0;
+		}
+
+		usleep_range(1000, 2000);
+	} while (time_before(jiffies, end));
+
+	dev_err(dev->dev, "punit semaphore timed out, resetting\n");
+	reset_semaphore(dev->dev);
+
+	ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+				PUNIT_SEMAPHORE, &sem);
+	if (ret)
+		dev_err(dev->dev, "iosf failed to read punit semaphore\n");
+	else
+		dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
+
+	WARN_ON(1);
+
+	return -ETIMEDOUT;
+}
+
+static void baytrail_i2c_release(struct dw_i2c_dev *dev)
+{
+	if (!dev || !dev->dev)
+		return;
+
+	if (!dev->acquire_lock)
+		return;
+
+	reset_semaphore(dev->dev);
+	dev_dbg(dev->dev, "punit semaphore held for %ums\n",
+		jiffies_to_msecs(jiffies - acquired));
+}
+
+int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+{
+	acpi_status status;
+	unsigned long long shared_host = 0;
+	acpi_handle handle;
+
+	if (!dev || !dev->dev)
+		return 0;
+
+	handle = ACPI_HANDLE(dev->dev);
+	if (!handle)
+		return 0;
+
+	status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+	if (ACPI_FAILURE(status))
+		return 0;
+
+	if (shared_host) {
+		dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+		dev->acquire_lock = baytrail_i2c_acquire;
+		dev->release_lock = baytrail_i2c_release;
+		dev->pm_runtime_disabled = true;
+	}
+
+	if (!iosf_mbi_available())
+		return -EPROBE_DEFER;
+
+	return 0;
+}
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 23628b7..6e25c01 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -170,10 +170,10 @@
 	u32 value;
 
 	if (dev->accessor_flags & ACCESS_16BIT)
-		value = readw(dev->base + offset) |
-			(readw(dev->base + offset + 2) << 16);
+		value = readw_relaxed(dev->base + offset) |
+			(readw_relaxed(dev->base + offset + 2) << 16);
 	else
-		value = readl(dev->base + offset);
+		value = readl_relaxed(dev->base + offset);
 
 	if (dev->accessor_flags & ACCESS_SWAP)
 		return swab32(value);
@@ -187,10 +187,10 @@
 		b = swab32(b);
 
 	if (dev->accessor_flags & ACCESS_16BIT) {
-		writew((u16)b, dev->base + offset);
-		writew((u16)(b >> 16), dev->base + offset + 2);
+		writew_relaxed((u16)b, dev->base + offset);
+		writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
 	} else {
-		writel(b, dev->base + offset);
+		writel_relaxed(b, dev->base + offset);
 	}
 }
 
@@ -285,6 +285,15 @@
 	u32 hcnt, lcnt;
 	u32 reg;
 	u32 sda_falling_time, scl_falling_time;
+	int ret;
+
+	if (dev->acquire_lock) {
+		ret = dev->acquire_lock(dev);
+		if (ret) {
+			dev_err(dev->dev, "couldn't acquire bus ownership\n");
+			return ret;
+		}
+	}
 
 	input_clock_khz = dev->get_clk_rate_khz(dev);
 
@@ -298,6 +307,8 @@
 	} else if (reg != DW_IC_COMP_TYPE_VALUE) {
 		dev_err(dev->dev, "Unknown Synopsys component type: "
 			"0x%08x\n", reg);
+		if (dev->release_lock)
+			dev->release_lock(dev);
 		return -ENODEV;
 	}
 
@@ -309,40 +320,39 @@
 	sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
 	scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
 
-	/* Standard-mode */
-	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-				4000,	/* tHD;STA = tHIGH = 4.0 us */
-				sda_falling_time,
-				0,	/* 0: DW default, 1: Ideal */
-				0);	/* No offset */
-	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-				4700,	/* tLOW = 4.7 us */
-				scl_falling_time,
-				0);	/* No offset */
-
-	/* Allow platforms to specify the ideal HCNT and LCNT values */
+	/* Set SCL timing parameters for standard-mode */
 	if (dev->ss_hcnt && dev->ss_lcnt) {
 		hcnt = dev->ss_hcnt;
 		lcnt = dev->ss_lcnt;
+	} else {
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					4000,	/* tHD;STA = tHIGH = 4.0 us */
+					sda_falling_time,
+					0,	/* 0: DW default, 1: Ideal */
+					0);	/* No offset */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					4700,	/* tLOW = 4.7 us */
+					scl_falling_time,
+					0);	/* No offset */
 	}
 	dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
 	dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
 	dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
-	/* Fast-mode */
-	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-				600,	/* tHD;STA = tHIGH = 0.6 us */
-				sda_falling_time,
-				0,	/* 0: DW default, 1: Ideal */
-				0);	/* No offset */
-	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-				1300,	/* tLOW = 1.3 us */
-				scl_falling_time,
-				0);	/* No offset */
-
+	/* Set SCL timing parameters for fast-mode */
 	if (dev->fs_hcnt && dev->fs_lcnt) {
 		hcnt = dev->fs_hcnt;
 		lcnt = dev->fs_lcnt;
+	} else {
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					600,	/* tHD;STA = tHIGH = 0.6 us */
+					sda_falling_time,
+					0,	/* 0: DW default, 1: Ideal */
+					0);	/* No offset */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					1300,	/* tLOW = 1.3 us */
+					scl_falling_time,
+					0);	/* No offset */
 	}
 	dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
 	dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@
 
 	/* configure the i2c master */
 	dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
+	if (dev->release_lock)
+		dev->release_lock(dev);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@
 	dev->abort_source = 0;
 	dev->rx_outstanding = 0;
 
+	if (dev->acquire_lock) {
+		ret = dev->acquire_lock(dev);
+		if (ret) {
+			dev_err(dev->dev, "couldn't acquire bus ownership\n");
+			goto done_nolock;
+		}
+	}
+
 	ret = i2c_dw_wait_bus_not_busy(dev);
 	if (ret < 0)
 		goto done;
@@ -672,6 +693,10 @@
 	ret = -EIO;
 
 done:
+	if (dev->release_lock)
+		dev->release_lock(dev);
+
+done_nolock:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
 	mutex_unlock(&dev->lock);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 5a410ef..9630222 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -61,6 +61,9 @@
  * @ss_lcnt: standard speed LCNT value
  * @fs_hcnt: fast speed HCNT value
  * @fs_lcnt: fast speed LCNT value
+ * @acquire_lock: function to acquire a hardware lock on the bus
+ * @release_lock: function to release a hardware lock on the bus
+ * @pm_runtime_disabled: true if pm runtime is disabled
  *
  * HCNT and LCNT parameters can be used if the platform knows more accurate
  * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@
 	u16			ss_lcnt;
 	u16			fs_hcnt;
 	u16			fs_lcnt;
+	int			(*acquire_lock)(struct dw_i2c_dev *dev);
+	void			(*release_lock)(struct dw_i2c_dev *dev);
+	bool			pm_runtime_disabled;
 };
 
 #define ACCESS_SWAP		0x00000001
@@ -119,3 +125,9 @@
 extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
 extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+
+#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
+extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+#else
+static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+#endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index acb40f9..6643d2d 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Texas Instruments.
  * Copyright (C) 2007 MontaVista Software Inc.
  * Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011 Intel corporation.
+ * Copyright (C) 2011, 2015 Intel Corporation.
  *
  * ----------------------------------------------------------------------------
  *
@@ -40,10 +40,6 @@
 #define DRIVER_NAME "i2c-designware-pci"
 
 enum dw_pci_ctl_id_t {
-	moorestown_0,
-	moorestown_1,
-	moorestown_2,
-
 	medfield_0,
 	medfield_1,
 	medfield_2,
@@ -101,28 +97,7 @@
 	.sda_hold = 0x9,
 };
 
-static struct  dw_pci_controller  dw_pci_controllers[] = {
-	[moorestown_0] = {
-		.bus_num     = 0,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[moorestown_1] = {
-		.bus_num     = 1,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[moorestown_2] = {
-		.bus_num     = 2,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
+static struct dw_pci_controller dw_pci_controllers[] = {
 	[medfield_0] = {
 		.bus_num     = 0,
 		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@
 		.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
 		.tx_fifo_depth = 32,
 		.rx_fifo_depth = 32,
-		.clk_khz = 100000,
 		.functionality = I2C_FUNC_10BIT_ADDR,
 		.scl_sda_cfg = &byt_config,
 	},
@@ -179,7 +153,6 @@
 		.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
 		.tx_fifo_depth = 32,
 		.rx_fifo_depth = 32,
-		.clk_khz = 100000,
 		.functionality = I2C_FUNC_10BIT_ADDR,
 		.scl_sda_cfg = &hsw_config,
 	},
@@ -259,7 +232,7 @@
 	dev->functionality = controller->functionality |
 				DW_DEFAULT_FUNCTIONALITY;
 
-	dev->master_cfg =  controller->bus_cfg;
+	dev->master_cfg = controller->bus_cfg;
 	if (controller->scl_sda_cfg) {
 		cfg = controller->scl_sda_cfg;
 		dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@
 MODULE_ALIAS("i2c_designware-pci");
 
 static const struct pci_device_id i2_designware_pci_ids[] = {
-	/* Moorestown */
-	{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
-	{ PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
-	{ PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
 	/* Medfield */
-	{ PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
+	{ PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
 	{ PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
 	{ PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
 	{ PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@
 	{ PCI_VDEVICE(INTEL, 0x9c61), haswell },
 	{ PCI_VDEVICE(INTEL, 0x9c62), haswell },
 	/* Braswell / Cherrytrail */
-	{ PCI_VDEVICE(INTEL, 0x22C1), baytrail,},
+	{ PCI_VDEVICE(INTEL, 0x22C1), baytrail },
 	{ PCI_VDEVICE(INTEL, 0x22C2), baytrail },
 	{ PCI_VDEVICE(INTEL, 0x22C3), baytrail },
 	{ PCI_VDEVICE(INTEL, 0x22C4), baytrail },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2b463c3..c270f5f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -195,6 +195,10 @@
 			clk_freq = pdata->i2c_scl_freq;
 	}
 
+	r = i2c_dw_eval_lock_support(dev);
+	if (r)
+		return r;
+
 	dev->functionality =
 		I2C_FUNC_I2C |
 		I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@
 		return r;
 	}
 
-	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
-	pm_runtime_use_autosuspend(&pdev->dev);
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
+	if (dev->pm_runtime_disabled) {
+		pm_runtime_forbid(&pdev->dev);
+	} else {
+		pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+		pm_runtime_use_autosuspend(&pdev->dev);
+		pm_runtime_set_active(&pdev->dev);
+		pm_runtime_enable(&pdev->dev);
+	}
 
 	return 0;
 }
@@ -310,7 +318,9 @@
 	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
 
 	clk_prepare_enable(i_dev->clk);
-	i2c_dw_init(i_dev);
+
+	if (!i_dev->pm_runtime_disabled)
+		i2c_dw_init(i_dev);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 7f3a9fe..d7b26fc 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -201,7 +201,7 @@
 	void __iomem		*base;
 	wait_queue_head_t	queue;
 	unsigned long		i2csr;
-	unsigned int 		disable_delay;
+	unsigned int		disable_delay;
 	int			stopped;
 	unsigned int		ifdr; /* IMX_I2C_IFDR */
 	unsigned int		cur_clk;
@@ -295,7 +295,6 @@
 	dma->chan_tx = dma_request_slave_channel(dev, "tx");
 	if (!dma->chan_tx) {
 		dev_dbg(dev, "can't request DMA tx channel\n");
-		ret = -ENODEV;
 		goto fail_al;
 	}
 
@@ -313,7 +312,6 @@
 	dma->chan_rx = dma_request_slave_channel(dev, "rx");
 	if (!dma->chan_rx) {
 		dev_dbg(dev, "can't request DMA rx channel\n");
-		ret = -ENODEV;
 		goto fail_tx;
 	}
 
@@ -481,8 +479,8 @@
 	i2c_clk_rate = clk_get_rate(i2c_imx->clk);
 	if (i2c_imx->cur_clk == i2c_clk_rate)
 		return;
-	else
-		i2c_imx->cur_clk = i2c_clk_rate;
+
+	i2c_imx->cur_clk = i2c_clk_rate;
 
 	div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
 	if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@
 	else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
 		i = i2c_imx->hwdata->ndivs - 1;
 	else
-		for (i = 0; i2c_clk_div[i].div < div; i++);
+		for (i = 0; i2c_clk_div[i].div < div; i++)
+			;
 
 	/* Store divider value */
 	i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@
 	result = wait_for_completion_timeout(
 				&i2c_imx->dma->cmd_complete,
 				msecs_to_jiffies(DMA_TIMEOUT));
-	if (result <= 0) {
+	if (result == 0) {
 		dmaengine_terminate_all(dma->chan_using);
-		return result ?: -ETIMEDOUT;
+		return -ETIMEDOUT;
 	}
 
 	/* Waiting for transfer complete. */
@@ -686,9 +685,9 @@
 	result = wait_for_completion_timeout(
 				&i2c_imx->dma->cmd_complete,
 				msecs_to_jiffies(DMA_TIMEOUT));
-	if (result <= 0) {
+	if (result == 0) {
 		dmaengine_terminate_all(dma->chan_using);
-		return result ?: -ETIMEDOUT;
+		return -ETIMEDOUT;
 	}
 
 	/* waiting for transfer complete. */
@@ -822,6 +821,7 @@
 	/* read data */
 	for (i = 0; i < msgs->len; i++) {
 		u8 len = 0;
+
 		result = i2c_imx_trx_complete(i2c_imx);
 		if (result)
 			return result;
@@ -917,15 +917,16 @@
 		/* write/read data */
 #ifdef CONFIG_I2C_DEBUG_BUS
 		temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-		dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
-			"MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
+		dev_dbg(&i2c_imx->adapter.dev,
+			"<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
+			__func__,
 			(temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
 			(temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
 			(temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
 		temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
 		dev_dbg(&i2c_imx->adapter.dev,
-			"<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
-			"IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
+			"<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
+			__func__,
 			(temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
 			(temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
 			(temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@
 	i2c_imx->adapter.owner		= THIS_MODULE;
 	i2c_imx->adapter.algo		= &i2c_imx_algo;
 	i2c_imx->adapter.dev.parent	= &pdev->dev;
-	i2c_imx->adapter.nr 		= pdev->id;
+	i2c_imx->adapter.nr		= pdev->id;
 	i2c_imx->adapter.dev.of_node	= pdev->dev.of_node;
 	i2c_imx->base			= base;
 
@@ -1063,7 +1064,7 @@
 		i2c_imx->adapter.name);
 	dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
 
-	/* Init DMA config if support*/
+	/* Init DMA config if supported */
 	i2c_imx_dma_request(i2c_imx, phy_addr);
 
 	return 0;   /* Return OK */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 7249b5b..abf5db7 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -12,6 +12,7 @@
  * kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -35,7 +36,9 @@
 	int pos;
 	int nmsgs;
 	int state; /* see STATE_ */
-	int clock_khz;
+	struct clk *clk;
+	int ip_clock_khz;
+	int bus_clock_khz;
 	void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
 	u8 (*getreg)(struct ocores_i2c *i2c, int reg);
 };
@@ -215,21 +218,34 @@
 		return -ETIMEDOUT;
 }
 
-static void ocores_init(struct ocores_i2c *i2c)
+static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
 {
 	int prescale;
+	int diff;
 	u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
 
 	/* make sure the device is disabled */
 	oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
-	prescale = (i2c->clock_khz / (5*100)) - 1;
+	prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
+	prescale = clamp(prescale, 0, 0xffff);
+
+	diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
+	if (abs(diff) > i2c->bus_clock_khz / 10) {
+		dev_err(dev,
+			"Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
+			i2c->ip_clock_khz, i2c->bus_clock_khz);
+		return -EINVAL;
+	}
+
 	oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
 	oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
 
 	/* Init the device */
 	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
 	oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
+
+	return 0;
 }
 
 
@@ -304,6 +320,8 @@
 	struct device_node *np = pdev->dev.of_node;
 	const struct of_device_id *match;
 	u32 val;
+	u32 clock_frequency;
+	bool clock_frequency_present;
 
 	if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
 		/* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@
 		}
 	}
 
-	if (of_property_read_u32(np, "clock-frequency", &val)) {
-		dev_err(&pdev->dev,
-			"Missing required parameter 'clock-frequency'\n");
-		return -ENODEV;
+	clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
+							&clock_frequency);
+	i2c->bus_clock_khz = 100;
+
+	i2c->clk = devm_clk_get(&pdev->dev, NULL);
+
+	if (!IS_ERR(i2c->clk)) {
+		int ret = clk_prepare_enable(i2c->clk);
+
+		if (ret) {
+			dev_err(&pdev->dev,
+				"clk_prepare_enable failed: %d\n", ret);
+			return ret;
+		}
+		i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
+		if (clock_frequency_present)
+			i2c->bus_clock_khz = clock_frequency / 1000;
 	}
-	i2c->clock_khz = val / 1000;
+
+	if (i2c->ip_clock_khz == 0) {
+		if (of_property_read_u32(np, "opencores,ip-clock-frequency",
+						&val)) {
+			if (!clock_frequency_present) {
+				dev_err(&pdev->dev,
+					"Missing required parameter 'opencores,ip-clock-frequency'\n");
+				return -ENODEV;
+			}
+			i2c->ip_clock_khz = clock_frequency / 1000;
+			dev_warn(&pdev->dev,
+				 "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
+		} else {
+			i2c->ip_clock_khz = val / 1000;
+			if (clock_frequency_present)
+				i2c->bus_clock_khz = clock_frequency / 1000;
+		}
+	}
 
 	of_property_read_u32(pdev->dev.of_node, "reg-io-width",
 				&i2c->reg_io_width);
@@ -368,7 +416,8 @@
 	if (pdata) {
 		i2c->reg_shift = pdata->reg_shift;
 		i2c->reg_io_width = pdata->reg_io_width;
-		i2c->clock_khz = pdata->clock_khz;
+		i2c->ip_clock_khz = pdata->clock_khz;
+		i2c->bus_clock_khz = 100;
 	} else {
 		ret = ocores_i2c_of_probe(pdev, i2c);
 		if (ret)
@@ -402,7 +451,9 @@
 		}
 	}
 
-	ocores_init(i2c);
+	ret = ocores_init(&pdev->dev, i2c);
+	if (ret)
+		return ret;
 
 	init_waitqueue_head(&i2c->wait);
 	ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@
 	/* remove adapter & data */
 	i2c_del_adapter(&i2c->adap);
 
+	if (!IS_ERR(i2c->clk))
+		clk_disable_unprepare(i2c->clk);
+
 	return 0;
 }
 
@@ -458,6 +512,8 @@
 	/* make sure the device is disabled */
 	oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
+	if (!IS_ERR(i2c->clk))
+		clk_disable_unprepare(i2c->clk);
 	return 0;
 }
 
@@ -465,9 +521,20 @@
 {
 	struct ocores_i2c *i2c = dev_get_drvdata(dev);
 
-	ocores_init(i2c);
+	if (!IS_ERR(i2c->clk)) {
+		unsigned long rate;
+		int ret = clk_prepare_enable(i2c->clk);
 
-	return 0;
+		if (ret) {
+			dev_err(dev,
+				"clk_prepare_enable failed: %d\n", ret);
+			return ret;
+		}
+		rate = clk_get_rate(i2c->clk) / 1000;
+		if (rate)
+			i2c->ip_clock_khz = rate;
+	}
+	return ocores_init(dev, i2c);
 }
 
 static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 44f03ee..d37d9db 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -148,13 +148,6 @@
 	return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
 }
 
-static inline void pmcmsptwi_reg_to_clock(
-			u32 reg, struct pmcmsptwi_clock *clock)
-{
-	clock->filter = (reg >> 12) & 0xf;
-	clock->clock = reg & 0x03ff;
-}
-
 static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
 {
 	return ((cfg->arbf & 0xf) << 12) |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9246284..5f96b1b 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -102,6 +102,9 @@
 
 	/* Settings */
 	unsigned int scl_frequency;
+	unsigned int scl_rise_ns;
+	unsigned int scl_fall_ns;
+	unsigned int sda_fall_ns;
 
 	/* Synchronization & notification */
 	spinlock_t lock;
@@ -435,6 +438,9 @@
  *
  * @clk_rate: I2C input clock rate
  * @scl_rate: Desired SCL rate
+ * @scl_rise_ns: How many ns it takes for SCL to rise.
+ * @scl_fall_ns: How many ns it takes for SCL to fall.
+ * @sda_fall_ns: How many ns it takes for SDA to fall.
  * @div_low: Divider output for low
  * @div_high: Divider output for high
  *
@@ -443,11 +449,16 @@
  * too high, we silently use the highest possible rate.
  */
 static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
+			      unsigned long scl_rise_ns,
+			      unsigned long scl_fall_ns,
+			      unsigned long sda_fall_ns,
 			      unsigned long *div_low, unsigned long *div_high)
 {
-	unsigned long min_low_ns, min_high_ns;
-	unsigned long max_data_hold_ns;
+	unsigned long spec_min_low_ns, spec_min_high_ns;
+	unsigned long spec_setup_start, spec_max_data_hold_ns;
 	unsigned long data_hold_buffer_ns;
+
+	unsigned long min_low_ns, min_high_ns;
 	unsigned long max_low_ns, min_total_ns;
 
 	unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@
 		scl_rate = 1000;
 
 	/*
-	 * min_low_ns:  The minimum number of ns we need to hold low
-	 *		to meet i2c spec
-	 * min_high_ns: The minimum number of ns we need to hold high
-	 *		to meet i2c spec
-	 * max_low_ns:  The maximum number of ns we can hold low
-	 *		to meet i2c spec
+	 * min_low_ns:  The minimum number of ns we need to hold low to
+	 *		meet I2C specification, should include fall time.
+	 * min_high_ns: The minimum number of ns we need to hold high to
+	 *		meet I2C specification, should include rise time.
+	 * max_low_ns:  The maximum number of ns we can hold low to meet
+	 *		I2C specification.
 	 *
-	 * Note: max_low_ns should be (max data hold time * 2 - buffer)
+	 * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
 	 *	 This is because the i2c host on Rockchip holds the data line
 	 *	 for half the low time.
 	 */
 	if (scl_rate <= 100000) {
-		min_low_ns = 4700;
-		min_high_ns = 4000;
-		max_data_hold_ns = 3450;
+		/* Standard-mode */
+		spec_min_low_ns = 4700;
+		spec_setup_start = 4700;
+		spec_min_high_ns = 4000;
+		spec_max_data_hold_ns = 3450;
 		data_hold_buffer_ns = 50;
 	} else {
-		min_low_ns = 1300;
-		min_high_ns = 600;
-		max_data_hold_ns = 900;
+		/* Fast-mode */
+		spec_min_low_ns = 1300;
+		spec_setup_start = 600;
+		spec_min_high_ns = 600;
+		spec_max_data_hold_ns = 900;
 		data_hold_buffer_ns = 50;
 	}
-	max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns;
+	min_high_ns = scl_rise_ns + spec_min_high_ns;
+
+	/*
+	 * Timings for repeated start:
+	 * - controller appears to drop SDA at .875x (7/8) programmed clk high.
+	 * - controller appears to keep SCL high for 2x programmed clk high.
+	 *
+	 * We need to account for those rules in picking our "high" time so
+	 * we meet tSU;STA and tHD;STA times.
+	 */
+	min_high_ns = max(min_high_ns,
+		DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+	min_high_ns = max(min_high_ns,
+		DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
+			      sda_fall_ns + spec_min_high_ns), 2));
+
+	min_low_ns = scl_fall_ns + spec_min_low_ns;
+	max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
 	min_total_ns = min_low_ns + min_high_ns;
 
 	/* Adjust to avoid overflow */
@@ -510,8 +542,8 @@
 	min_div_for_hold = (min_low_div + min_high_div);
 
 	/*
-	 * This is the maximum divider so we don't go over the max.
-	 * We don't round up here (we round down) since this is a max.
+	 * This is the maximum divider so we don't go over the maximum.
+	 * We don't round up here (we round down) since this is a maximum.
 	 */
 	max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
 
@@ -544,7 +576,7 @@
 		ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
 					     scl_rate_khz * 8 * min_total_ns);
 
-		/* Don't allow it to go over the max */
+		/* Don't allow it to go over the maximum */
 		if (ideal_low_div > max_low_div)
 			ideal_low_div = max_low_div;
 
@@ -588,9 +620,9 @@
 	u64 t_low_ns, t_high_ns;
 	int ret;
 
-	ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low,
-				 &div_high);
-
+	ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
+				 i2c->scl_fall_ns, i2c->sda_fall_ns,
+				 &div_low, &div_high);
 	WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
 
 	clk_enable(i2c->clk);
@@ -633,9 +665,10 @@
 	switch (event) {
 	case PRE_RATE_CHANGE:
 		if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
-				      &div_low, &div_high) != 0) {
+				       i2c->scl_rise_ns, i2c->scl_fall_ns,
+				       i2c->sda_fall_ns,
+				       &div_low, &div_high) != 0)
 			return NOTIFY_STOP;
-		}
 
 		/* scale up */
 		if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@
 		i2c->scl_frequency = DEFAULT_SCL_RATE;
 	}
 
+	/*
+	 * Read rise and fall time from device tree. If not available use
+	 * the default maximum timing from the specification.
+	 */
+	if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
+				 &i2c->scl_rise_ns)) {
+		if (i2c->scl_frequency <= 100000)
+			i2c->scl_rise_ns = 1000;
+		else
+			i2c->scl_rise_ns = 300;
+	}
+	if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
+				 &i2c->scl_fall_ns))
+		i2c->scl_fall_ns = 300;
+	if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
+				 &i2c->scl_fall_ns))
+		i2c->sda_fall_ns = i2c->scl_fall_ns;
+
 	strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
 	i2c->adap.owner = THIS_MODULE;
 	i2c->adap.algo = &rk3x_i2c_algorithm;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 28b87e6..29f1433 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -286,6 +286,7 @@
 	if (rx_fifo_avail > 0 && buf_remaining > 0) {
 		BUG_ON(buf_remaining > 3);
 		val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+		val = cpu_to_le32(val);
 		memcpy(buf, &val, buf_remaining);
 		buf_remaining = 0;
 		rx_fifo_avail--;
@@ -344,6 +345,7 @@
 	if (tx_fifo_avail > 0 && buf_remaining > 0) {
 		BUG_ON(buf_remaining > 3);
 		memcpy(&val, buf, buf_remaining);
+		val = le32_to_cpu(val);
 
 		/* Again update before writing to FIFO to make sure isr sees. */
 		i2c_dev->msg_buf_remaining = 0;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e9eae57..edf274c 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -102,7 +102,7 @@
 		struct acpi_resource_i2c_serialbus *sb;
 
 		sb = &ares->data.i2c_serial_bus;
-		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+		if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
 			info->addr = sb->slave_address;
 			if (sb->access_mode == ACPI_I2C_10BIT_MODE)
 				info->flags |= I2C_CLIENT_TEN;
@@ -679,9 +679,6 @@
 		status = driver->remove(client);
 	}
 
-	if (dev->of_node)
-		irq_dispose_mapping(client->irq);
-
 	dev_pm_domain_detach(&client->dev, true);
 	return status;
 }
@@ -698,101 +695,6 @@
 		driver->shutdown(client);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
-	struct i2c_client *client = i2c_verify_client(dev);
-	struct i2c_driver *driver;
-
-	if (!client || !dev->driver)
-		return 0;
-	driver = to_i2c_driver(dev->driver);
-	if (!driver->suspend)
-		return 0;
-	return driver->suspend(client, mesg);
-}
-
-static int i2c_legacy_resume(struct device *dev)
-{
-	struct i2c_client *client = i2c_verify_client(dev);
-	struct i2c_driver *driver;
-
-	if (!client || !dev->driver)
-		return 0;
-	driver = to_i2c_driver(dev->driver);
-	if (!driver->resume)
-		return 0;
-	return driver->resume(client);
-}
-
-static int i2c_device_pm_suspend(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_suspend(dev);
-	else
-		return i2c_legacy_suspend(dev, PMSG_SUSPEND);
-}
-
-static int i2c_device_pm_resume(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_resume(dev);
-	else
-		return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_freeze(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_freeze(dev);
-	else
-		return i2c_legacy_suspend(dev, PMSG_FREEZE);
-}
-
-static int i2c_device_pm_thaw(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_thaw(dev);
-	else
-		return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_poweroff(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_poweroff(dev);
-	else
-		return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
-}
-
-static int i2c_device_pm_restore(struct device *dev)
-{
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-	if (pm)
-		return pm_generic_restore(dev);
-	else
-		return i2c_legacy_resume(dev);
-}
-#else /* !CONFIG_PM_SLEEP */
-#define i2c_device_pm_suspend	NULL
-#define i2c_device_pm_resume	NULL
-#define i2c_device_pm_freeze	NULL
-#define i2c_device_pm_thaw	NULL
-#define i2c_device_pm_poweroff	NULL
-#define i2c_device_pm_restore	NULL
-#endif /* !CONFIG_PM_SLEEP */
-
 static void i2c_client_dev_release(struct device *dev)
 {
 	kfree(to_i2c_client(dev));
@@ -804,6 +706,7 @@
 	return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
 		       to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
 }
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
 static ssize_t
 show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +720,6 @@
 
 	return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
 static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +728,7 @@
 	&dev_attr_modalias.attr,
 	NULL
 };
-
-static struct attribute_group i2c_dev_attr_group = {
-	.attrs		= i2c_dev_attrs,
-};
-
-static const struct attribute_group *i2c_dev_attr_groups[] = {
-	&i2c_dev_attr_group,
-	NULL
-};
-
-static const struct dev_pm_ops i2c_device_pm_ops = {
-	.suspend = i2c_device_pm_suspend,
-	.resume = i2c_device_pm_resume,
-	.freeze = i2c_device_pm_freeze,
-	.thaw = i2c_device_pm_thaw,
-	.poweroff = i2c_device_pm_poweroff,
-	.restore = i2c_device_pm_restore,
-	SET_RUNTIME_PM_OPS(
-		pm_generic_runtime_suspend,
-		pm_generic_runtime_resume,
-		NULL
-	)
-};
+ATTRIBUTE_GROUPS(i2c_dev);
 
 struct bus_type i2c_bus_type = {
 	.name		= "i2c",
@@ -857,12 +736,11 @@
 	.probe		= i2c_device_probe,
 	.remove		= i2c_device_remove,
 	.shutdown	= i2c_device_shutdown,
-	.pm		= &i2c_device_pm_ops,
 };
 EXPORT_SYMBOL_GPL(i2c_bus_type);
 
 static struct device_type i2c_client_type = {
-	.groups		= i2c_dev_attr_groups,
+	.groups		= i2c_dev_groups,
 	.uevent		= i2c_device_uevent,
 	.release	= i2c_client_dev_release,
 };
@@ -1261,6 +1139,7 @@
 
 	return count;
 }
+static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 
 /*
  * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1194,6 @@
 			"delete_device");
 	return res;
 }
-
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
 				   i2c_sysfs_delete_device);
 
@@ -1326,18 +1203,10 @@
 	&dev_attr_delete_device.attr,
 	NULL
 };
-
-static struct attribute_group i2c_adapter_attr_group = {
-	.attrs		= i2c_adapter_attrs,
-};
-
-static const struct attribute_group *i2c_adapter_attr_groups[] = {
-	&i2c_adapter_attr_group,
-	NULL
-};
+ATTRIBUTE_GROUPS(i2c_adapter);
 
 struct device_type i2c_adapter_type = {
-	.groups		= i2c_adapter_attr_groups,
+	.groups		= i2c_adapter_groups,
 	.release	= i2c_adapter_dev_release,
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1288,6 @@
 	if (of_get_property(node, "wakeup-source", NULL))
 		info.flags |= I2C_CLIENT_WAKE;
 
-	request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
 	result = i2c_new_device(adap, &info);
 	if (result == NULL) {
 		dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1663,15 @@
 	/* device name is gone after device_unregister */
 	dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
 
-	/* clean up the sysfs representation */
+	/* wait until all references to the device are gone
+	 *
+	 * FIXME: This is old code and should ideally be replaced by an
+	 * alternative which results in decoupling the lifetime of the struct
+	 * device from the i2c_adapter, like spi or netdev do. Any solution
+	 * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
+	 */
 	init_completion(&adap->dev_released);
 	device_unregister(&adap->dev);
-
-	/* wait for sysfs to drop all references */
 	wait_for_completion(&adap->dev_released);
 
 	/* free bus id */
@@ -1859,14 +1730,6 @@
 	if (res)
 		return res;
 
-	/* Drivers should switch to dev_pm_ops instead. */
-	if (driver->suspend)
-		pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
-			driver->driver.name);
-	if (driver->resume)
-		pr_warn("i2c-core: driver [%s] using legacy resume method\n",
-			driver->driver.name);
-
 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
 	INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index ec11b40..3d8f4fe 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -41,6 +41,7 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 
@@ -186,6 +187,8 @@
 {
 	struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
 	struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+	struct device_node *of_node = client->dev.of_node;
+	bool idle_disconnect_dt;
 	struct gpio_desc *gpio;
 	int num, force, class;
 	struct pca954x *data;
@@ -217,8 +220,13 @@
 	data->type = id->driver_data;
 	data->last_chan = 0;		   /* force the first selection */
 
+	idle_disconnect_dt = of_node &&
+		of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+
 	/* Now create an adapter for each channel */
 	for (num = 0; num < chips[data->type].nchans; num++) {
+		bool idle_disconnect_pd = false;
+
 		force = 0;			  /* dynamic adap number */
 		class = 0;			  /* no class by default */
 		if (pdata) {
@@ -229,12 +237,13 @@
 			} else
 				/* discard unconfigured channels */
 				break;
+			idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
 		}
 
 		data->virt_adaps[num] =
 			i2c_add_mux_adapter(adap, &client->dev, client,
 				force, num, class, pca954x_select_chan,
-				(pdata && pdata->modes[num].deselect_on_exit)
+				(idle_disconnect_pd || idle_disconnect_dt)
 					? pca954x_deselect_mux : NULL);
 
 		if (data->virt_adaps[num] == NULL) {
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1793aea..6eb738c 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1793,11 +1793,11 @@
 	tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
 					 IDETAPE_DSC_RW_MAX);
 	printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
-		"%lums tDSC%s\n",
+		"%ums tDSC%s\n",
 		drive->name, tape->name, *(u16 *)&tape->caps[14],
 		(*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
 		tape->buffer_size / 1024,
-		tape->best_dsc_rw_freq * 1000 / HZ,
+		jiffies_to_msecs(tape->best_dsc_rw_freq),
 		(drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
 
 	ide_proc_register_driver(drive, tape->driver);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 5167225..b96c636 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -58,20 +58,11 @@
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
 	}
 
-/* LSB is in nV to eliminate floating point */
-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
-
-/*
- *  scales calculated as:
- *  rates_to_lsb[sample_rate] / (1 << pga);
- *  pga is 1 for 0, 2
- */
-
 static const int mcp3422_scales[4][4] = {
-	{ 1000000, 250000, 62500, 15625 },
-	{ 500000 , 125000, 31250, 7812 },
-	{ 250000 , 62500 , 15625, 3906 },
-	{ 125000 , 31250 , 7812 , 1953 } };
+	{ 1000000, 500000, 250000, 125000 },
+	{ 250000 , 125000, 62500 , 31250  },
+	{ 62500  , 31250 , 15625 , 7812   },
+	{ 15625  , 7812  , 3906  , 1953   } };
 
 /* Constant msleep times for data acquisitions */
 static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index b9666f2..fabd24e 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -296,7 +296,8 @@
 	if (iadc->poll_eoc) {
 		ret = iadc_poll_wait_eoc(iadc, wait);
 	} else {
-		ret = wait_for_completion_timeout(&iadc->complete, wait);
+		ret = wait_for_completion_timeout(&iadc->complete,
+			usecs_to_jiffies(wait));
 		if (!ret)
 			ret = -ETIMEDOUT;
 		else
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 52d7043..55a90082 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -640,6 +640,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ssp_suspend(struct device *dev)
 {
 	int ret;
@@ -688,6 +689,7 @@
 
 	return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct dev_pm_ops ssp_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume)
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index f57562a..15c73e2 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -322,7 +322,7 @@
 	st = iio_priv(indio_dev);
 	spi_set_drvdata(spi, indio_dev);
 
-	st->reg = devm_regulator_get(&spi->dev, "vcc");
+	st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
 	if (!IS_ERR(st->reg)) {
 		ret = regulator_enable(st->reg);
 		if (ret)
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 623c145..7d79a1a 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/bitops.h>
 #include <linux/completion.h>
+#include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
@@ -39,8 +40,12 @@
 
 #define DHT11_DATA_VALID_TIME	2000000000  /* 2s in ns */
 
-#define DHT11_EDGES_PREAMBLE 4
+#define DHT11_EDGES_PREAMBLE 2
 #define DHT11_BITS_PER_READ 40
+/*
+ * Note that when reading the sensor actually 84 edges are detected, but
+ * since the last edge is not significant, we only store 83:
+ */
 #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
 
 /* Data transmission timing (nano seconds) */
@@ -57,6 +62,7 @@
 	int				irq;
 
 	struct completion		completion;
+	struct mutex			lock;
 
 	s64				timestamp;
 	int				temperature;
@@ -88,7 +94,7 @@
 	unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
 
 	/* Calculate timestamp resolution */
-	for (i = 0; i < dht11->num_edges; ++i) {
+	for (i = 1; i < dht11->num_edges; ++i) {
 		t = dht11->edges[i].ts - dht11->edges[i-1].ts;
 		if (t > 0 && t < timeres)
 			timeres = t;
@@ -138,6 +144,27 @@
 	return 0;
 }
 
+/*
+ * IRQ handler called on GPIO edges
+ */
+static irqreturn_t dht11_handle_irq(int irq, void *data)
+{
+	struct iio_dev *iio = data;
+	struct dht11 *dht11 = iio_priv(iio);
+
+	/* TODO: Consider making the handler safe for IRQ sharing */
+	if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
+		dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+		dht11->edges[dht11->num_edges++].value =
+						gpio_get_value(dht11->gpio);
+
+		if (dht11->num_edges >= DHT11_EDGES_PER_READ)
+			complete(&dht11->completion);
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int dht11_read_raw(struct iio_dev *iio_dev,
 			const struct iio_chan_spec *chan,
 			int *val, int *val2, long m)
@@ -145,6 +172,7 @@
 	struct dht11 *dht11 = iio_priv(iio_dev);
 	int ret;
 
+	mutex_lock(&dht11->lock);
 	if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
 		reinit_completion(&dht11->completion);
 
@@ -157,8 +185,17 @@
 		if (ret)
 			goto err;
 
+		ret = request_irq(dht11->irq, dht11_handle_irq,
+				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				  iio_dev->name, iio_dev);
+		if (ret)
+			goto err;
+
 		ret = wait_for_completion_killable_timeout(&dht11->completion,
 								 HZ);
+
+		free_irq(dht11->irq, iio_dev);
+
 		if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
 			dev_err(&iio_dev->dev,
 					"Only %d signal edges detected\n",
@@ -185,6 +222,7 @@
 		ret = -EINVAL;
 err:
 	dht11->num_edges = -1;
+	mutex_unlock(&dht11->lock);
 	return ret;
 }
 
@@ -193,27 +231,6 @@
 	.read_raw		= dht11_read_raw,
 };
 
-/*
- * IRQ handler called on GPIO edges
-*/
-static irqreturn_t dht11_handle_irq(int irq, void *data)
-{
-	struct iio_dev *iio = data;
-	struct dht11 *dht11 = iio_priv(iio);
-
-	/* TODO: Consider making the handler safe for IRQ sharing */
-	if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-		dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
-		dht11->edges[dht11->num_edges++].value =
-						gpio_get_value(dht11->gpio);
-
-		if (dht11->num_edges >= DHT11_EDGES_PER_READ)
-			complete(&dht11->completion);
-	}
-
-	return IRQ_HANDLED;
-}
-
 static const struct iio_chan_spec dht11_chan_spec[] = {
 	{ .type = IIO_TEMP,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), },
@@ -256,11 +273,6 @@
 		dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio);
 		return -EINVAL;
 	}
-	ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				pdev->name, iio);
-	if (ret)
-		return ret;
 
 	dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
 	dht11->num_edges = -1;
@@ -268,6 +280,7 @@
 	platform_set_drvdata(pdev, iio);
 
 	init_completion(&dht11->completion);
+	mutex_init(&dht11->lock);
 	iio->name = pdev->name;
 	iio->dev.parent = &pdev->dev;
 	iio->info = &dht11_iio_info;
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index b541646..fa3b809 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -45,12 +45,12 @@
 			   struct iio_chan_spec const *chan, int *val,
 			   int *val2, long mask)
 {
-	struct i2c_client *client = iio_priv(indio_dev);
+	struct i2c_client **client = iio_priv(indio_dev);
 	int ret;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
-		ret = i2c_smbus_read_word_data(client,
+		ret = i2c_smbus_read_word_data(*client,
 					       chan->type == IIO_TEMP ?
 					       SI7020CMD_TEMP_HOLD :
 					       SI7020CMD_RH_HOLD);
@@ -126,7 +126,7 @@
 	/* Wait the maximum power-up time after software reset. */
 	msleep(15);
 
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
 	if (!indio_dev)
 		return -ENOMEM;
 
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index b70873d..fa795dc 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/debugfs.h>
+#include <linux/bitops.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -414,7 +415,7 @@
 		mutex_unlock(&indio_dev->mlock);
 		if (ret)
 			return ret;
-		val16 = ((val16 & 0xFFF) << 4) >> 4;
+		val16 = sign_extend32(val16, 11);
 		*val = val16;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f73e60b..d8d5bed 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -780,7 +780,11 @@
 
 	i2c_set_clientdata(client, indio_dev);
 	indio_dev->dev.parent = &client->dev;
-	indio_dev->name = id->name;
+	/* id will be NULL when enumerated via ACPI */
+	if (id)
+		indio_dev->name = (char *)id->name;
+	else
+		indio_dev->name = (char *)dev_name(&client->dev);
 	indio_dev->channels = inv_mpu_channels;
 	indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
 
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ae68c64..a224afd 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,7 @@
 config GP2AP020A00F
 	tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
 	depends on I2C
+	select REGMAP_I2C
 	select IIO_BUFFER
 	select IIO_TRIGGERED_BUFFER
 	select IRQ_WORK
@@ -126,6 +127,7 @@
 config JSA1212
 	tristate "JSA1212 ALS and proximity sensor driver"
 	depends on I2C
+	select REGMAP_I2C
 	help
 	 Say Y here if you want to build a IIO driver for JSA1212
 	 proximity & ALS sensor device.
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 4c7a4c5..a5d6de7 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -18,6 +18,8 @@
 
 config AK09911
 	tristate "Asahi Kasei AK09911 3-axis Compass"
+	depends on I2C
+	depends on GPIOLIB
 	select AK8975
 	help
 	  Deprecated: AK09911 is now supported by AK8975 driver.
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 56a4b7c..45d67e9 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1124,6 +1124,9 @@
 	if (!optlen)
 		return -EINVAL;
 
+	memset(&sa_path, 0, sizeof(sa_path));
+	sa_path.vlan_id = 0xffff;
+
 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
 	ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
 	if (ret)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6095872..8b8cc6f 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -294,7 +294,8 @@
 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
 		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
 				   &context->umem_tree);
-	if (likely(!atomic_read(&context->notifier_count)))
+	if (likely(!atomic_read(&context->notifier_count)) ||
+	    context->odp_mrs_count == 1)
 		umem->odp_data->mn_counters_active = true;
 	else
 		list_add(&umem->odp_data->no_private_counters,
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 643c08a..b716b08 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -258,5 +258,6 @@
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b7943ff..a9f0489 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -400,6 +400,52 @@
 	return ret;
 }
 
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+				  struct ib_uverbs_query_device_resp *resp,
+				  struct ib_device_attr *attr)
+{
+	resp->fw_ver		= attr->fw_ver;
+	resp->node_guid		= file->device->ib_dev->node_guid;
+	resp->sys_image_guid	= attr->sys_image_guid;
+	resp->max_mr_size	= attr->max_mr_size;
+	resp->page_size_cap	= attr->page_size_cap;
+	resp->vendor_id		= attr->vendor_id;
+	resp->vendor_part_id	= attr->vendor_part_id;
+	resp->hw_ver		= attr->hw_ver;
+	resp->max_qp		= attr->max_qp;
+	resp->max_qp_wr		= attr->max_qp_wr;
+	resp->device_cap_flags	= attr->device_cap_flags;
+	resp->max_sge		= attr->max_sge;
+	resp->max_sge_rd	= attr->max_sge_rd;
+	resp->max_cq		= attr->max_cq;
+	resp->max_cqe		= attr->max_cqe;
+	resp->max_mr		= attr->max_mr;
+	resp->max_pd		= attr->max_pd;
+	resp->max_qp_rd_atom	= attr->max_qp_rd_atom;
+	resp->max_ee_rd_atom	= attr->max_ee_rd_atom;
+	resp->max_res_rd_atom	= attr->max_res_rd_atom;
+	resp->max_qp_init_rd_atom	= attr->max_qp_init_rd_atom;
+	resp->max_ee_init_rd_atom	= attr->max_ee_init_rd_atom;
+	resp->atomic_cap		= attr->atomic_cap;
+	resp->max_ee			= attr->max_ee;
+	resp->max_rdd			= attr->max_rdd;
+	resp->max_mw			= attr->max_mw;
+	resp->max_raw_ipv6_qp		= attr->max_raw_ipv6_qp;
+	resp->max_raw_ethy_qp		= attr->max_raw_ethy_qp;
+	resp->max_mcast_grp		= attr->max_mcast_grp;
+	resp->max_mcast_qp_attach	= attr->max_mcast_qp_attach;
+	resp->max_total_mcast_qp_attach	= attr->max_total_mcast_qp_attach;
+	resp->max_ah			= attr->max_ah;
+	resp->max_fmr			= attr->max_fmr;
+	resp->max_map_per_fmr		= attr->max_map_per_fmr;
+	resp->max_srq			= attr->max_srq;
+	resp->max_srq_wr		= attr->max_srq_wr;
+	resp->max_srq_sge		= attr->max_srq_sge;
+	resp->max_pkeys			= attr->max_pkeys;
+	resp->local_ca_ack_delay	= attr->local_ca_ack_delay;
+	resp->phys_port_cnt		= file->device->ib_dev->phys_port_cnt;
+}
+
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
 			       const char __user *buf,
 			       int in_len, int out_len)
@@ -420,47 +466,7 @@
 		return ret;
 
 	memset(&resp, 0, sizeof resp);
-
-	resp.fw_ver 		       = attr.fw_ver;
-	resp.node_guid 		       = file->device->ib_dev->node_guid;
-	resp.sys_image_guid 	       = attr.sys_image_guid;
-	resp.max_mr_size 	       = attr.max_mr_size;
-	resp.page_size_cap 	       = attr.page_size_cap;
-	resp.vendor_id 		       = attr.vendor_id;
-	resp.vendor_part_id 	       = attr.vendor_part_id;
-	resp.hw_ver 		       = attr.hw_ver;
-	resp.max_qp 		       = attr.max_qp;
-	resp.max_qp_wr 		       = attr.max_qp_wr;
-	resp.device_cap_flags 	       = attr.device_cap_flags;
-	resp.max_sge 		       = attr.max_sge;
-	resp.max_sge_rd 	       = attr.max_sge_rd;
-	resp.max_cq 		       = attr.max_cq;
-	resp.max_cqe 		       = attr.max_cqe;
-	resp.max_mr 		       = attr.max_mr;
-	resp.max_pd 		       = attr.max_pd;
-	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
-	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
-	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
-	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
-	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
-	resp.atomic_cap 	       = attr.atomic_cap;
-	resp.max_ee 		       = attr.max_ee;
-	resp.max_rdd 		       = attr.max_rdd;
-	resp.max_mw 		       = attr.max_mw;
-	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
-	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
-	resp.max_mcast_grp 	       = attr.max_mcast_grp;
-	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
-	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
-	resp.max_ah 		       = attr.max_ah;
-	resp.max_fmr 		       = attr.max_fmr;
-	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
-	resp.max_srq 		       = attr.max_srq;
-	resp.max_srq_wr 	       = attr.max_srq_wr;
-	resp.max_srq_sge 	       = attr.max_srq_sge;
-	resp.max_pkeys 		       = attr.max_pkeys;
-	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
-	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
+	copy_query_dev_fields(file, &resp, &attr);
 
 	if (copy_to_user((void __user *) (unsigned long) cmd.response,
 			 &resp, sizeof resp))
@@ -2091,20 +2097,21 @@
 	if (qp->real_qp == qp) {
 		ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
 		if (ret)
-			goto out;
+			goto release_qp;
 		ret = qp->device->modify_qp(qp, attr,
 			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
 	} else {
 		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
 	}
 
-	put_qp_read(qp);
-
 	if (ret)
-		goto out;
+		goto release_qp;
 
 	ret = in_len;
 
+release_qp:
+	put_qp_read(qp);
+
 out:
 	kfree(attr);
 
@@ -3287,3 +3294,64 @@
 
 	return ret ? ret : in_len;
 }
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+			      struct ib_udata *ucore,
+			      struct ib_udata *uhw)
+{
+	struct ib_uverbs_ex_query_device_resp resp;
+	struct ib_uverbs_ex_query_device  cmd;
+	struct ib_device_attr attr;
+	struct ib_device *device;
+	int err;
+
+	device = file->device->ib_dev;
+	if (ucore->inlen < sizeof(cmd))
+		return -EINVAL;
+
+	err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+	if (err)
+		return err;
+
+	if (cmd.comp_mask)
+		return -EINVAL;
+
+	if (cmd.reserved)
+		return -EINVAL;
+
+	resp.response_length = offsetof(typeof(resp), odp_caps);
+
+	if (ucore->outlen < resp.response_length)
+		return -ENOSPC;
+
+	err = device->query_device(device, &attr);
+	if (err)
+		return err;
+
+	copy_query_dev_fields(file, &resp.base, &attr);
+	resp.comp_mask = 0;
+
+	if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
+		goto end;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+	resp.odp_caps.per_transport_caps.rc_odp_caps =
+		attr.odp_caps.per_transport_caps.rc_odp_caps;
+	resp.odp_caps.per_transport_caps.uc_odp_caps =
+		attr.odp_caps.per_transport_caps.uc_odp_caps;
+	resp.odp_caps.per_transport_caps.ud_odp_caps =
+		attr.odp_caps.per_transport_caps.ud_odp_caps;
+	resp.odp_caps.reserved = 0;
+#else
+	memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
+#endif
+	resp.response_length += sizeof(resp.odp_caps);
+
+end:
+	err = ib_copy_to_udata(ucore, &resp, resp.response_length);
+	if (err)
+		return err;
+
+	return 0;
+}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5db1a8c..259dcc7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -123,6 +123,7 @@
 				    struct ib_udata *uhw) = {
 	[IB_USER_VERBS_EX_CMD_CREATE_FLOW]	= ib_uverbs_ex_create_flow,
 	[IB_USER_VERBS_EX_CMD_DESTROY_FLOW]	= ib_uverbs_ex_destroy_flow,
+	[IB_USER_VERBS_EX_CMD_QUERY_DEVICE]	= ib_uverbs_ex_query_device,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 794555d..bdfac2c 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -225,13 +225,20 @@
 	struct c4iw_cq *chp;
 	unsigned long flag;
 
+	spin_lock_irqsave(&dev->lock, flag);
 	chp = get_chp(dev, qid);
 	if (chp) {
+		atomic_inc(&chp->refcnt);
+		spin_unlock_irqrestore(&dev->lock, flag);
 		t4_clear_cq_armed(&chp->cq);
 		spin_lock_irqsave(&chp->comp_handler_lock, flag);
 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
 		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-	} else
+		if (atomic_dec_and_test(&chp->refcnt))
+			wake_up(&chp->wait);
+	} else {
 		PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+		spin_unlock_irqrestore(&dev->lock, flag);
+	}
 	return 0;
 }
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index b5678ac..d87e165 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -196,7 +196,7 @@
 	return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
-#define C4IW_WR_TO (30*HZ)
+#define C4IW_WR_TO (60*HZ)
 
 struct c4iw_wr_wait {
 	struct completion completion;
@@ -220,22 +220,21 @@
 				 u32 hwtid, u32 qpid,
 				 const char *func)
 {
-	unsigned to = C4IW_WR_TO;
 	int ret;
 
-	do {
-		ret = wait_for_completion_timeout(&wr_waitp->completion, to);
-		if (!ret) {
-			printk(KERN_ERR MOD "%s - Device %s not responding - "
-			       "tid %u qpid %u\n", func,
-			       pci_name(rdev->lldi.pdev), hwtid, qpid);
-			if (c4iw_fatal_error(rdev)) {
-				wr_waitp->ret = -EIO;
-				break;
-			}
-			to = to << 2;
-		}
-	} while (!ret);
+	if (c4iw_fatal_error(rdev)) {
+		wr_waitp->ret = -EIO;
+		goto out;
+	}
+
+	ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
+	if (!ret) {
+		PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+		     func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+		rdev->flags |= T4_FATAL_ERROR;
+		wr_waitp->ret = -EIO;
+	}
+out:
 	if (wr_waitp->ret)
 		PDBG("%s: FW reply %d tid %u qpid %u\n",
 		     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 4977082..33c45df 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,7 +277,7 @@
 	}
 
 	spin_lock(&tmp->d_lock);
-	if (!(d_unhashed(tmp) && tmp->d_inode)) {
+	if (!d_unhashed(tmp) && tmp->d_inode) {
 		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 6559af6..e08db70 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -908,9 +908,6 @@
 /* clean up any chip type-specific stuff */
 void ipath_chip_done(void);
 
-/* check to see if we have to force ordering for write combining */
-int ipath_unordered_wc(void);
-
 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
 			  unsigned cnt);
 void ipath_cancel_sends(struct ipath_devdata *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 1d7bd82..1a7e20a 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -47,16 +47,3 @@
 {
 	return 0;
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int ipath_unordered_wc(void)
-{
-	return 1;
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 3428acb..4ad0b93 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -167,18 +167,3 @@
 		dd->ipath_wc_cookie = 0; /* even on failure */
 	}
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering.  Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int ipath_unordered_wc(void)
-{
-	return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 56a593e..39a4888 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -372,7 +372,7 @@
 		*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
 		if (*slave < 0) {
 			mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
-					gid.global.interface_id);
+				     be64_to_cpu(gid.global.interface_id));
 			return -ENOENT;
 		}
 		return 0;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 543ecdd..0176caa 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -369,8 +369,7 @@
 	int err;
 
 	mutex_lock(&cq->resize_mutex);
-
-	if (entries < 1) {
+	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
 		err = -EINVAL;
 		goto out;
 	}
@@ -381,7 +380,7 @@
 		goto out;
 	}
 
-	if (entries > dev->dev->caps.max_cqes) {
+	if (entries > dev->dev->caps.max_cqes + 1) {
 		err = -EINVAL;
 		goto out;
 	}
@@ -394,7 +393,7 @@
 		/* Can't be smaller than the number of outstanding CQEs */
 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
 		if (entries < outst_cqe + 1) {
-			err = 0;
+			err = -EINVAL;
 			goto out;
 		}
 
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c761971..5904026 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@
 #define GUID_TBL_BLK_NUM_ENTRIES 8
 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
 
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do {\
+	if ((value) > U32_MAX)			 \
+		counter = cpu_to_be32(U32_MAX); \
+	else					 \
+		counter = cpu_to_be32(value);	 \
+} while (0)
+
 struct mlx4_mad_rcv_buf {
 	struct ib_grh grh;
 	u8 payload[256];
@@ -806,10 +814,14 @@
 static void edit_counter(struct mlx4_counter *cnt,
 					struct ib_pma_portcounters *pma_cnt)
 {
-	pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
-	pma_cnt->port_rcv_data  = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
-	pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
-	pma_cnt->port_rcv_packets  = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
+	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+			     (be64_to_cpu(cnt->tx_bytes) >> 2));
+	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+			     (be64_to_cpu(cnt->rx_bytes) >> 2));
+	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+			     be64_to_cpu(cnt->tx_frames));
+	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+			     be64_to_cpu(cnt->rx_frames));
 }
 
 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index eb8e215f..b972c0b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1269,8 +1269,7 @@
 	struct mlx4_dev	*dev = mdev->dev;
 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
 	struct mlx4_ib_steering *ib_steering = NULL;
-	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
 	struct mlx4_flow_reg_id	reg_id;
 
 	if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@
 				    !!(mqp->flags &
 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
 				    prot, &reg_id.id);
-	if (err)
+	if (err) {
+		pr_err("multicast attach op failed, err %d\n", err);
 		goto err_malloc;
+	}
 
 	reg_id.mirror = 0;
 	if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@
 	struct net_device *ndev;
 	struct mlx4_ib_gid_entry *ge;
 	struct mlx4_flow_reg_id reg_id = {0, 0};
-
-	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
 
 	if (mdev->dev->caps.steering_mode ==
 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -2698,8 +2697,12 @@
 	spin_lock_bh(&ibdev->iboe.lock);
 	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
 		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+		enum ib_port_state curr_port_state;
 
-		enum ib_port_state curr_port_state =
+		if (!curr_netdev)
+			continue;
+
+		curr_port_state =
 			(netif_running(curr_netdev) &&
 			 netif_carrier_ok(curr_netdev)) ?
 			IB_PORT_ACTIVE : IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index dfc6ca1..ed2bd67 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1696,8 +1696,10 @@
 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
 				err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
-				if (err)
-					return -EINVAL;
+				if (err) {
+					err = -EINVAL;
+					goto out;
+				}
 				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
 					dev->qp1_proxy[qp->port - 1] = qp;
 			}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 03bf812..cc4ac1e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -997,7 +997,7 @@
 	struct ib_device_attr *dprops = NULL;
 	struct ib_port_attr *pprops = NULL;
 	struct mlx5_general_caps *gen;
-	int err = 0;
+	int err = -ENOMEM;
 	int port;
 
 	gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@
 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
+	dev->ib_dev.uverbs_ex_cmd_mask =
+		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
 	dev->ib_dev.query_device	= mlx5_ib_query_device;
 	dev->ib_dev.query_port		= mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 32a28bd..cd9822e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1012,6 +1012,7 @@
 		goto err_2;
 	}
 	mr->umem = umem;
+	mr->dev = dev;
 	mr->live = 1;
 	kvfree(in);
 
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b43456a..c9780d9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -55,12 +55,19 @@
 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+#define EQ_INTR_PER_SEC_THRSH_HI 150000
+#define EQ_INTR_PER_SEC_THRSH_LOW 100000
+#define EQ_AIC_MAX_EQD 20
+#define EQ_AIC_MIN_EQD 0
+
+void ocrdma_eqd_set_task(struct work_struct *work);
 
 struct ocrdma_dev_attr {
 	u8 fw_ver[32];
 	u32 vendor_id;
 	u32 device_id;
 	u16 max_pd;
+	u16 max_dpp_pds;
 	u16 max_cq;
 	u16 max_cqe;
 	u16 max_qp;
@@ -116,12 +123,19 @@
 	bool created;
 };
 
+struct ocrdma_aic_obj {         /* Adaptive interrupt coalescing (AIC) info */
+	u32 prev_eqd;
+	u64 eq_intr_cnt;
+	u64 prev_eq_intr_cnt;
+};
+
 struct ocrdma_eq {
 	struct ocrdma_queue_info q;
 	u32 vector;
 	int cq_cnt;
 	struct ocrdma_dev *dev;
 	char irq_name[32];
+	struct ocrdma_aic_obj aic_obj;
 };
 
 struct ocrdma_mq {
@@ -171,6 +185,21 @@
 	struct ocrdma_dev *dev;
 };
 
+struct ocrdma_pd_resource_mgr {
+	u32 pd_norm_start;
+	u16 pd_norm_count;
+	u16 pd_norm_thrsh;
+	u16 max_normal_pd;
+	u32 pd_dpp_start;
+	u16 pd_dpp_count;
+	u16 pd_dpp_thrsh;
+	u16 max_dpp_pd;
+	u16 dpp_page_index;
+	unsigned long *pd_norm_bitmap;
+	unsigned long *pd_dpp_bitmap;
+	bool pd_prealloc_valid;
+};
+
 struct stats_mem {
 	struct ocrdma_mqe mqe;
 	void *va;
@@ -198,6 +227,7 @@
 
 	struct ocrdma_eq *eq_tbl;
 	int eq_cnt;
+	struct delayed_work eqd_work;
 	u16 base_eqid;
 	u16 max_eq;
 
@@ -255,7 +285,12 @@
 	struct ocrdma_stats rx_qp_err_stats;
 	struct ocrdma_stats tx_dbg_stats;
 	struct ocrdma_stats rx_dbg_stats;
+	struct ocrdma_stats driver_stats;
+	struct ocrdma_stats reset_stats;
 	struct dentry *dir;
+	atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
+	atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
+	struct ocrdma_pd_resource_mgr *pd_mgr;
 };
 
 struct ocrdma_cq {
@@ -335,7 +370,6 @@
 
 struct ocrdma_qp {
 	struct ib_qp ibqp;
-	struct ocrdma_dev *dev;
 
 	u8 __iomem *sq_db;
 	struct ocrdma_qp_hwq_info sq;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index f3cc8c9..d812904 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,22 @@
 #include <net/netevent.h>
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
 
 #include "ocrdma.h"
 #include "ocrdma_verbs.h"
 #include "ocrdma_ah.h"
 #include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
 
 #define OCRDMA_VID_PCP_SHIFT	0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-			struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
+			struct ib_ah_attr *attr, union ib_gid *sgid,
+			int pdid, bool *isvlan)
 {
 	int status = 0;
-	u16 vlan_tag; bool vlan_enabled = false;
+	u16 vlan_tag;
 	struct ocrdma_eth_vlan eth;
 	struct ocrdma_grh grh;
 	int eth_sz;
@@ -59,7 +62,7 @@
 		vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
 		eth.vlan_tag = cpu_to_be16(vlan_tag);
 		eth_sz = sizeof(struct ocrdma_eth_vlan);
-		vlan_enabled = true;
+		*isvlan = true;
 	} else {
 		eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
 		eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@
 	/* Eth HDR */
 	memcpy(&ah->av->eth_hdr, &eth, eth_sz);
 	memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
-	if (vlan_enabled)
+	if (*isvlan)
 		ah->av->valid |= OCRDMA_AV_VLAN_VALID;
 	ah->av->valid = cpu_to_le32(ah->av->valid);
 	return status;
@@ -91,6 +94,7 @@
 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
 {
 	u32 *ahid_addr;
+	bool isvlan = false;
 	int status;
 	struct ocrdma_ah *ah;
 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@
 		}
 	}
 
-	status = set_av_attr(dev, ah, attr, &sgid, pd->id);
+	status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
 	if (status)
 		goto av_conf_err;
 
 	/* if pd is for the user process, pass the ah_id to user space */
 	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
 		ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
-		*ahid_addr = ah->id;
+		*ahid_addr = 0;
+		*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+		if (isvlan)
+			*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
+				       OCRDMA_AH_VLAN_VALID_SHIFT);
 	}
+
 	return &ah->ibah;
 
 av_conf_err:
@@ -191,5 +200,20 @@
 		       struct ib_grh *in_grh,
 		       struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-	return IB_MAD_RESULT_SUCCESS;
+	int status;
+	struct ocrdma_dev *dev;
+
+	switch (in_mad->mad_hdr.mgmt_class) {
+	case IB_MGMT_CLASS_PERF_MGMT:
+		dev = get_ocrdma_dev(ibdev);
+		if (!ocrdma_pma_counters(dev, out_mad))
+			status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+		else
+			status = IB_MAD_RESULT_SUCCESS;
+		break;
+	default:
+		status = IB_MAD_RESULT_SUCCESS;
+		break;
+	}
+	return status;
 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 8ac49e7..726a87c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -28,6 +28,12 @@
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
 
+enum {
+	OCRDMA_AH_ID_MASK		= 0x3FF,
+	OCRDMA_AH_VLAN_VALID_MASK	= 0x01,
+	OCRDMA_AH_VLAN_VALID_SHIFT	= 0x1F
+};
+
 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 638bff1..0c9e959 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -734,6 +734,9 @@
 		break;
 	}
 
+	if (type < OCRDMA_MAX_ASYNC_ERRORS)
+		atomic_inc(&dev->async_err_stats[type]);
+
 	if (qp_event) {
 		if (qp->ibqp.event_handler)
 			qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@
 	return 0;
 }
 
-static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
-				       struct ocrdma_cq *cq)
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+				struct ocrdma_cq *cq, bool sq)
 {
-	unsigned long flags;
 	struct ocrdma_qp *qp;
-	bool buddy_cq_found = false;
-	/* Go through list of QPs in error state which are using this CQ
-	 * and invoke its callback handler to trigger CQE processing for
-	 * error/flushed CQE. It is rare to find more than few entries in
-	 * this list as most consumers stops after getting error CQE.
-	 * List is traversed only once when a matching buddy cq found for a QP.
-	 */
-	spin_lock_irqsave(&dev->flush_q_lock, flags);
-	list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+	struct list_head *cur;
+	struct ocrdma_cq *bcq = NULL;
+	struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
+
+	list_for_each(cur, head) {
+		if (sq)
+			qp = list_entry(cur, struct ocrdma_qp, sq_entry);
+		else
+			qp = list_entry(cur, struct ocrdma_qp, rq_entry);
+
 		if (qp->srq)
 			continue;
 		/* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@
 		 * if completion came on rq, sq's cq is buddy cq.
 		 */
 		if (qp->sq_cq == cq)
-			cq = qp->rq_cq;
+			bcq = qp->rq_cq;
 		else
-			cq = qp->sq_cq;
-		buddy_cq_found = true;
-		break;
+			bcq = qp->sq_cq;
+		return bcq;
 	}
+	return NULL;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+				       struct ocrdma_cq *cq)
+{
+	unsigned long flags;
+	struct ocrdma_cq *bcq = NULL;
+
+	/* Go through list of QPs in error state which are using this CQ
+	 * and invoke its callback handler to trigger CQE processing for
+	 * error/flushed CQE. It is rare to find more than few entries in
+	 * this list as most consumers stops after getting error CQE.
+	 * List is traversed only once when a matching buddy cq found for a QP.
+	 */
+	spin_lock_irqsave(&dev->flush_q_lock, flags);
+	/* Check if buddy CQ is present.
+	 * true - Check for  SQ CQ
+	 * false - Check for RQ CQ
+	 */
+	bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
+	if (bcq == NULL)
+		bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
 	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
-	if (buddy_cq_found == false)
-		return;
-	if (cq->ibcq.comp_handler) {
-		spin_lock_irqsave(&cq->comp_handler_lock, flags);
-		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+
+	/* if there is valid buddy cq, look for its completion handler */
+	if (bcq && bcq->ibcq.comp_handler) {
+		spin_lock_irqsave(&bcq->comp_handler_lock, flags);
+		(*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
+		spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
 	}
 }
 
@@ -935,6 +960,7 @@
 
 	} while (budget);
 
+	eq->aic_obj.eq_intr_cnt++;
 	ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
 	return IRQ_HANDLED;
 }
@@ -1050,6 +1076,9 @@
 	attr->max_pd =
 	    (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
 	    OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+	attr->max_dpp_pds =
+	   (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
 	attr->max_qp =
 	    (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
 	    OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@
 	return status;
 }
 
+
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
+{
+	int status = -ENOMEM;
+	size_t pd_bitmap_size;
+	struct ocrdma_alloc_pd_range *cmd;
+	struct ocrdma_alloc_pd_range_rsp *rsp;
+
+	/* Pre allocate the DPP PDs */
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	cmd->pd_count = dev->attr.max_dpp_pds;
+	cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+	if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
+		dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+				OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+		dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+				OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+		dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+		pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+		dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+						     GFP_KERNEL);
+	}
+	kfree(cmd);
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+	if (rsp->pd_count) {
+		dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
+					OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+		dev->pd_mgr->max_normal_pd = rsp->pd_count;
+		pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+		dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
+						      GFP_KERNEL);
+	}
+
+	if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
+		/* Enable PD resource manager */
+		dev->pd_mgr->pd_prealloc_valid = true;
+	} else {
+		return -ENOMEM;
+	}
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
+{
+	struct ocrdma_dealloc_pd_range *cmd;
+
+	/* return normal PDs to firmware */
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
+	if (!cmd)
+		goto mbx_err;
+
+	if (dev->pd_mgr->max_normal_pd) {
+		cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
+		cmd->pd_count = dev->pd_mgr->max_normal_pd;
+		ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	}
+
+	if (dev->pd_mgr->max_dpp_pd) {
+		kfree(cmd);
+		/* return DPP PDs to firmware */
+		cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
+					  sizeof(*cmd));
+		if (!cmd)
+			goto mbx_err;
+
+		cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
+		cmd->pd_count = dev->pd_mgr->max_dpp_pd;
+		ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	}
+mbx_err:
+	kfree(cmd);
+}
+
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
+{
+	int status;
+
+	dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
+			      GFP_KERNEL);
+	if (!dev->pd_mgr) {
+		pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+		return;
+	}
+	status = ocrdma_mbx_alloc_pd_range(dev);
+	if (status) {
+		pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
+			 __func__, dev->id);
+	}
+}
+
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
+{
+	ocrdma_mbx_dealloc_pd_range(dev);
+	kfree(dev->pd_mgr->pd_norm_bitmap);
+	kfree(dev->pd_mgr->pd_dpp_bitmap);
+	kfree(dev->pd_mgr);
+}
+
 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
 			       int *num_pages, int *page_size)
 {
@@ -1896,8 +2041,9 @@
 {
 	bool found;
 	unsigned long flags;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
-	spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
+	spin_lock_irqsave(&dev->flush_q_lock, flags);
 	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
 	if (!found)
 		list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@
 		if (!found)
 			list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
 	}
-	spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
+	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
 }
 
 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@
 	int status;
 	u32 len, hw_pages, hw_page_size;
 	dma_addr_t pa;
-	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_pd *pd = qp->pd;
+	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 	struct pci_dev *pdev = dev->nic_info.pdev;
 	u32 max_wqe_allocated;
 	u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@
 	int status;
 	u32 len, hw_pages, hw_page_size;
 	dma_addr_t pa = 0;
-	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_pd *pd = qp->pd;
+	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 	struct pci_dev *pdev = dev->nic_info.pdev;
 	u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
 
@@ -2086,7 +2234,8 @@
 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
 					struct ocrdma_qp *qp)
 {
-	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_pd *pd = qp->pd;
+	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 	struct pci_dev *pdev = dev->nic_info.pdev;
 	dma_addr_t pa = 0;
 	int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@
 {
 	int status = -ENOMEM;
 	u32 flags = 0;
-	struct ocrdma_dev *dev = qp->dev;
 	struct ocrdma_pd *pd = qp->pd;
+	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 	struct pci_dev *pdev = dev->nic_info.pdev;
 	struct ocrdma_cq *cq;
 	struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@
 	union ib_gid sgid, zgid;
 	u32 vlan_id;
 	u8 mac_addr[6];
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
 	if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
 		return -EINVAL;
-	if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
-		ocrdma_init_service_level(qp->dev);
+	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+		ocrdma_init_service_level(dev);
 	cmd->params.tclass_sq_psn |=
 	    (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
 	cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@
 	cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
 	memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
 	       sizeof(cmd->params.dgid));
-	status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+	status = ocrdma_query_gid(&dev->ibdev, 1,
 			ah_attr->grh.sgid_index, &sgid);
 	if (status)
 		return status;
@@ -2307,7 +2457,9 @@
 
 	qp->sgid_idx = ah_attr->grh.sgid_index;
 	memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
-	ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
+	status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
+	if (status)
+		return status;
 	cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
 				(mac_addr[2] << 16) | (mac_addr[3] << 24);
 	/* convert them to LE format. */
@@ -2320,7 +2472,7 @@
 		    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
 		cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
 		cmd->params.rnt_rc_sl_fl |=
-			(qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
+			(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
 	}
 	return 0;
 }
@@ -2330,6 +2482,7 @@
 				struct ib_qp_attr *attrs, int attr_mask)
 {
 	int status = 0;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@
 			return status;
 	} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
 		/* set the default mac address for UD, GSI QPs */
-		cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
-			(qp->dev->nic_info.mac_addr[1] << 8) |
-			(qp->dev->nic_info.mac_addr[2] << 16) |
-			(qp->dev->nic_info.mac_addr[3] << 24);
-		cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
-					(qp->dev->nic_info.mac_addr[5] << 8);
+		cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
+			(dev->nic_info.mac_addr[1] << 8) |
+			(dev->nic_info.mac_addr[2] << 16) |
+			(dev->nic_info.mac_addr[3] << 24);
+		cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
+					(dev->nic_info.mac_addr[5] << 8);
 	}
 	if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
 	    attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@
 		cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
 	}
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-		if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
+		if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
 			status = -EINVAL;
 			goto pmtu_err;
 		}
@@ -2417,7 +2570,7 @@
 		cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
 	}
 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-		if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
+		if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
 			status = -EINVAL;
 			goto pmtu_err;
 		}
@@ -2870,6 +3023,82 @@
 	return status;
 }
 
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+				 int num)
+{
+	int i, status = -ENOMEM;
+	struct ocrdma_modify_eqd_req *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
+	if (!cmd)
+		return status;
+
+	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+	cmd->cmd.num_eq = num;
+	for (i = 0; i < num; i++) {
+		cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
+		cmd->cmd.set_eqd[i].phase = 0;
+		cmd->cmd.set_eqd[i].delay_multiplier =
+				(eq[i].aic_obj.prev_eqd * 65)/100;
+	}
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+			     int num)
+{
+	int num_eqs, i = 0;
+	if (num > 8) {
+		while (num) {
+			num_eqs = min(num, 8);
+			ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
+			i += num_eqs;
+			num -= num_eqs;
+		}
+	} else {
+		ocrdma_mbx_modify_eqd(dev, eq, num);
+	}
+	return 0;
+}
+
+void ocrdma_eqd_set_task(struct work_struct *work)
+{
+	struct ocrdma_dev *dev =
+		container_of(work, struct ocrdma_dev, eqd_work.work);
+	struct ocrdma_eq *eq = 0;
+	int i, num = 0, status = -EINVAL;
+	u64 eq_intr;
+
+	for (i = 0; i < dev->eq_cnt; i++) {
+		eq = &dev->eq_tbl[i];
+		if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
+			eq_intr = eq->aic_obj.eq_intr_cnt -
+				  eq->aic_obj.prev_eq_intr_cnt;
+			if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
+			    (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
+				eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
+				num++;
+			} else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
+				   (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
+				eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
+				num++;
+			}
+		}
+		eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
+	}
+
+	if (num)
+		status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+	schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+}
+
 int ocrdma_init_hw(struct ocrdma_dev *dev)
 {
 	int status;
@@ -2915,6 +3144,7 @@
 
 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
 {
+	ocrdma_free_pd_pool(dev);
 	ocrdma_mbx_delete_ah_tbl(dev);
 
 	/* cleanup the eqs */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 6eed8f19..e905972 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -136,5 +136,7 @@
 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
 char *port_speed_string(struct ocrdma_dev *dev);
 void ocrdma_init_service_level(struct ocrdma_dev *);
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
+void ocrdma_free_pd_range(struct ocrdma_dev *dev);
 
 #endif				/* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b0b2257..7a2b59a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -239,7 +239,7 @@
 
 	dev->ibdev.node_type = RDMA_NODE_IB_CA;
 	dev->ibdev.phys_port_cnt = 1;
-	dev->ibdev.num_comp_vectors = 1;
+	dev->ibdev.num_comp_vectors = dev->eq_cnt;
 
 	/* mandatory verbs. */
 	dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@
 	if (dev->stag_arr == NULL)
 		goto alloc_err;
 
+	ocrdma_alloc_pd_pool(dev);
+
 	spin_lock_init(&dev->av_tbl.lock);
 	spin_lock_init(&dev->flush_q_lock);
 	return 0;
@@ -491,6 +493,9 @@
 	spin_unlock(&ocrdma_devlist_lock);
 	/* Init stats */
 	ocrdma_add_port_stats(dev);
+	/* Interrupt Moderation */
+	INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
+	schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
 
 	pr_info("%s %s: %s \"%s\" port %d\n",
 		dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@
 	/* first unregister with stack to stop all the active traffic
 	 * of the registered clients.
 	 */
-	ocrdma_rem_port_stats(dev);
+	cancel_delayed_work_sync(&dev->eqd_work);
 	ocrdma_remove_sysfiles(dev);
-
 	ib_unregister_device(&dev->ibdev);
 
+	ocrdma_rem_port_stats(dev);
+
 	spin_lock(&ocrdma_devlist_lock);
 	list_del_rcu(&dev->entry);
 	spin_unlock(&ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 4e03648..243c87c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -75,6 +75,8 @@
 	OCRDMA_CMD_DESTROY_RBQ = 26,
 
 	OCRDMA_CMD_GET_RDMA_STATS = 27,
+	OCRDMA_CMD_ALLOC_PD_RANGE = 28,
+	OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
 
 	OCRDMA_CMD_MAX
 };
@@ -87,6 +89,7 @@
 	OCRDMA_CMD_CREATE_MQ		= 21,
 	OCRDMA_CMD_GET_CTRL_ATTRIBUTES  = 32,
 	OCRDMA_CMD_GET_FW_VER		= 35,
+	OCRDMA_CMD_MODIFY_EQ_DELAY      = 41,
 	OCRDMA_CMD_DELETE_MQ		= 53,
 	OCRDMA_CMD_DELETE_CQ		= 54,
 	OCRDMA_CMD_DELETE_EQ		= 55,
@@ -101,7 +104,7 @@
 	QTYPE_MCCQ	= 3
 };
 
-#define OCRDMA_MAX_SGID		8
+#define OCRDMA_MAX_SGID		16
 
 #define OCRDMA_MAX_QP    2048
 #define OCRDMA_MAX_CQ    2048
@@ -314,6 +317,29 @@
 
 #define OCRDMA_EQ_MINOR_OTHER	0x1
 
+struct ocrmda_set_eqd {
+	u32 eq_id;
+	u32 phase;
+	u32 delay_multiplier;
+};
+
+struct ocrdma_modify_eqd_cmd {
+	struct ocrdma_mbx_hdr req;
+	u32 num_eq;
+	struct ocrmda_set_eqd set_eqd[8];
+} __packed;
+
+struct ocrdma_modify_eqd_req {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_modify_eqd_cmd cmd;
+};
+
+
+struct ocrdma_modify_eq_delay_rsp {
+	struct ocrdma_mbx_rsp hdr;
+	u32 rsvd0;
+} __packed;
+
 enum {
 	OCRDMA_MCQE_STATUS_SHIFT	= 0,
 	OCRDMA_MCQE_STATUS_MASK		= 0xFFFF,
@@ -441,7 +467,9 @@
 	OCRDMA_DEVICE_FATAL_EVENT	= 0x08,
 	OCRDMA_SRQCAT_ERROR		= 0x0E,
 	OCRDMA_SRQ_LIMIT_EVENT		= 0x0F,
-	OCRDMA_QP_LAST_WQE_EVENT	= 0x10
+	OCRDMA_QP_LAST_WQE_EVENT	= 0x10,
+
+	OCRDMA_MAX_ASYNC_ERRORS
 };
 
 /* mailbox command request and responses */
@@ -1297,6 +1325,37 @@
 	struct ocrdma_mbx_rsp rsp;
 };
 
+struct ocrdma_alloc_pd_range {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 enable_dpp_rsvd;
+	u32 pd_count;
+};
+
+struct ocrdma_alloc_pd_range_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+	u32 dpp_page_pdid;
+	u32 pd_count;
+};
+
+enum {
+	OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_dealloc_pd_range {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 start_pd_id;
+	u32 pd_count;
+};
+
+struct ocrdma_dealloc_pd_range_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 rsvd;
+};
+
 enum {
 	OCRDMA_ADDR_CHECK_ENABLE	= 1,
 	OCRDMA_ADDR_CHECK_DISABLE	= 0
@@ -1597,7 +1656,9 @@
 	OCRDMA_CQE_INV_EEC_STATE_ERR,
 	OCRDMA_CQE_FATAL_ERR,
 	OCRDMA_CQE_RESP_TIMEOUT_ERR,
-	OCRDMA_CQE_GENERAL_ERR
+	OCRDMA_CQE_GENERAL_ERR,
+
+	OCRDMA_MAX_CQE_ERR
 };
 
 enum {
@@ -1673,6 +1734,7 @@
 	OCRDMA_FLAG_FENCE_R	= 0x8,
 	OCRDMA_FLAG_SOLICIT	= 0x10,
 	OCRDMA_FLAG_IMM		= 0x20,
+	OCRDMA_FLAG_AH_VLAN_PR  = 0x40,
 
 	/* Stag flags */
 	OCRDMA_LKEY_FLAG_LOCAL_WR	= 0x1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 41a9aec..48d7ef5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -26,6 +26,7 @@
  *******************************************************************/
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_pma.h>
 #include "ocrdma_stats.h"
 
 static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@
 	return stats;
 }
 
+static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
+{
+	struct ocrdma_rdma_stats_resp *rdma_stats =
+		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+	return convert_to_64bit(rx_stats->roce_frames_lo,
+		rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+		+ (u64)rx_stats->roce_frame_payload_len_drops;
+}
+
+static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
+{
+	struct ocrdma_rdma_stats_resp *rdma_stats =
+		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+	return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+		rx_stats->roce_frame_bytes_hi))/4;
+}
+
 static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
 {
 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@
 	return stats;
 }
 
+static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
+{
+	struct ocrdma_rdma_stats_resp *rdma_stats =
+		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+	return (convert_to_64bit(tx_stats->send_pkts_lo,
+				 tx_stats->send_pkts_hi) +
+	convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
+	convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
+	convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+			 tx_stats->read_rsp_pkts_hi) +
+	convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
+}
+
+static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
+{
+	struct ocrdma_rdma_stats_resp *rdma_stats =
+		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+	return (convert_to_64bit(tx_stats->send_bytes_lo,
+				 tx_stats->send_bytes_hi) +
+		convert_to_64bit(tx_stats->write_bytes_lo,
+				 tx_stats->write_bytes_hi) +
+		convert_to_64bit(tx_stats->read_req_bytes_lo,
+				 tx_stats->read_req_bytes_hi) +
+		convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+				 tx_stats->read_rsp_bytes_hi))/4;
+}
+
 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
 {
 	char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@
 	return dev->stats_mem.debugfs_mem;
 }
 
+static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+{
+	char *stats = dev->stats_mem.debugfs_mem, *pcur;
+
+
+	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+	pcur = stats;
+	pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
+				(u64)(dev->async_err_stats
+				[OCRDMA_CQ_ERROR].counter));
+	pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
+				(u64)dev->async_err_stats
+				[OCRDMA_CQ_OVERRUN_ERROR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
+				(u64)dev->async_err_stats
+				[OCRDMA_CQ_QPCAT_ERROR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
+				(u64)dev->async_err_stats
+				[OCRDMA_QP_ACCESS_ERROR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
+				(u64)dev->async_err_stats
+				[OCRDMA_QP_COMM_EST_EVENT].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
+				(u64)dev->async_err_stats
+				[OCRDMA_SQ_DRAINED_EVENT].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
+				(u64)dev->async_err_stats
+				[OCRDMA_DEVICE_FATAL_EVENT].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
+				(u64)dev->async_err_stats
+				[OCRDMA_SRQCAT_ERROR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
+				(u64)dev->async_err_stats
+				[OCRDMA_SRQ_LIMIT_EVENT].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
+				(u64)dev->async_err_stats
+				[OCRDMA_QP_LAST_WQE_EVENT].counter);
+
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_LEN_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_QP_OP_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_PROT_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_WR_FLUSH_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_MW_BIND_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_BAD_RESP_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_ACCESS_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_REM_INV_REQ_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_REM_ACCESS_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_REM_OP_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_RETRY_EXC_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_REM_ABORT_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_INV_EECN_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_FATAL_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
+	pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
+				(u64)dev->cqe_err_stats
+				[OCRDMA_CQE_GENERAL_ERR].counter);
+	return stats;
+}
+
 static void ocrdma_update_stats(struct ocrdma_dev *dev)
 {
 	ulong now = jiffies, secs;
 	int status = 0;
+	struct ocrdma_rdma_stats_resp *rdma_stats =
+		      (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
 
 	secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
 	if (secs) {
@@ -444,10 +605,74 @@
 		if (status)
 			pr_err("%s: stats mbox failed with status = %d\n",
 			       __func__, status);
+		/* Update PD counters from PD resource manager */
+		if (dev->pd_mgr->pd_prealloc_valid) {
+			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
+			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
+			/* Threshold stata*/
+			rsrc_stats = &rdma_stats->th_rsrc_stats;
+			rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
+			rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
+		}
 		dev->last_stats_time = jiffies;
 	}
 }
 
+static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
+					const char __user *buffer,
+					size_t count, loff_t *ppos)
+{
+	char tmp_str[32];
+	long reset;
+	int status = 0;
+	struct ocrdma_stats *pstats = filp->private_data;
+	struct ocrdma_dev *dev = pstats->dev;
+
+	if (count > 32)
+		goto err;
+
+	if (copy_from_user(tmp_str, buffer, count))
+		goto err;
+
+	tmp_str[count-1] = '\0';
+	if (kstrtol(tmp_str, 10, &reset))
+		goto err;
+
+	switch (pstats->type) {
+	case OCRDMA_RESET_STATS:
+		if (reset) {
+			status = ocrdma_mbx_rdma_stats(dev, true);
+			if (status) {
+				pr_err("Failed to reset stats = %d", status);
+				goto err;
+			}
+		}
+		break;
+	default:
+		goto err;
+	}
+
+	return count;
+err:
+	return -EFAULT;
+}
+
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+			struct ib_mad *out_mad)
+{
+	struct ib_pma_portcounters *pma_cnt;
+
+	memset(out_mad->data, 0, sizeof out_mad->data);
+	pma_cnt = (void *)(out_mad->data + 40);
+	ocrdma_update_stats(dev);
+
+	pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
+	pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
+	pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
+	pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
+	return 0;
+}
+
 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
 					size_t usr_buf_len, loff_t *ppos)
 {
@@ -492,6 +717,9 @@
 	case OCRDMA_RX_DBG_STATS:
 		data = ocrdma_rx_dbg_stats(dev);
 		break;
+	case OCRDMA_DRV_STATS:
+		data = ocrdma_driver_dbg_stats(dev);
+		break;
 
 	default:
 		status = -EFAULT;
@@ -514,6 +742,7 @@
 	.owner = THIS_MODULE,
 	.open = simple_open,
 	.read = ocrdma_dbgfs_ops_read,
+	.write = ocrdma_dbgfs_ops_write,
 };
 
 void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@
 				 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
 		goto err;
 
+	dev->driver_stats.type = OCRDMA_DRV_STATS;
+	dev->driver_stats.dev = dev;
+	if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+					&dev->driver_stats, &ocrdma_dbg_ops))
+		goto err;
+
+	dev->reset_stats.type = OCRDMA_RESET_STATS;
+	dev->reset_stats.dev = dev;
+	if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+				&dev->reset_stats, &ocrdma_dbg_ops))
+		goto err;
+
 	/* Now create dma_mem for stats mbx command */
 	if (!ocrdma_alloc_stats_mem(dev))
 		goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 5f5e20c..091edd6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -43,12 +43,16 @@
 	OCRDMA_RXQP_ERRSTATS,
 	OCRDMA_TXQP_ERRSTATS,
 	OCRDMA_TX_DBG_STATS,
-	OCRDMA_RX_DBG_STATS
+	OCRDMA_RX_DBG_STATS,
+	OCRDMA_DRV_STATS,
+	OCRDMA_RESET_STATS
 };
 
 void ocrdma_rem_debugfs(void);
 void ocrdma_init_debugfs(void);
 void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
 void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+			struct ib_mad *out_mad);
 
 #endif	/* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index fb8d8c4..8771755 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@
 
 	dev = get_ocrdma_dev(ibdev);
 	memset(sgid, 0, sizeof(*sgid));
-	if (index > OCRDMA_MAX_SGID)
+	if (index >= OCRDMA_MAX_SGID)
 		return -EINVAL;
 
 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@
 	return found;
 }
 
+
+static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
+{
+	u16 pd_bitmap_idx = 0;
+	const unsigned long *pd_bitmap;
+
+	if (dpp_pool) {
+		pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
+		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+						    dev->pd_mgr->max_dpp_pd);
+		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+		dev->pd_mgr->pd_dpp_count++;
+		if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
+			dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
+	} else {
+		pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
+		pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+						    dev->pd_mgr->max_normal_pd);
+		__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+		dev->pd_mgr->pd_norm_count++;
+		if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
+			dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
+	}
+	return pd_bitmap_idx;
+}
+
+static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
+					bool dpp_pool)
+{
+	u16 pd_count;
+	u16 pd_bit_index;
+
+	pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
+			      dev->pd_mgr->pd_norm_count;
+	if (pd_count == 0)
+		return -EINVAL;
+
+	if (dpp_pool) {
+		pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
+		if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
+			return -EINVAL;
+		} else {
+			__clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
+			dev->pd_mgr->pd_dpp_count--;
+		}
+	} else {
+		pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
+		if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
+			return -EINVAL;
+		} else {
+			__clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
+			dev->pd_mgr->pd_norm_count--;
+		}
+	}
+
+	return 0;
+}
+
+static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+				   bool dpp_pool)
+{
+	int status;
+
+	mutex_lock(&dev->dev_lock);
+	status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
+	mutex_unlock(&dev->dev_lock);
+	return status;
+}
+
+static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+	u16 pd_idx = 0;
+	int status = 0;
+
+	mutex_lock(&dev->dev_lock);
+	if (pd->dpp_enabled) {
+		/* try allocating DPP PD, if not available then normal PD */
+		if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
+			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
+			pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
+			pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
+		} else if (dev->pd_mgr->pd_norm_count <
+			   dev->pd_mgr->max_normal_pd) {
+			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+			pd->dpp_enabled = false;
+		} else {
+			status = -EINVAL;
+		}
+	} else {
+		if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
+			pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+			pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+		} else {
+			status = -EINVAL;
+		}
+	}
+	mutex_unlock(&dev->dev_lock);
+	return status;
+}
+
 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
 					  struct ocrdma_ucontext *uctx,
 					  struct ib_udata *udata)
@@ -272,6 +373,11 @@
 					   dev->attr.wqe_size) : 0;
 	}
 
+	if (dev->pd_mgr->pd_prealloc_valid) {
+		status = ocrdma_get_pd_num(dev, pd);
+		return (status == 0) ? pd : ERR_PTR(status);
+	}
+
 retry:
 	status = ocrdma_mbx_alloc_pd(dev, pd);
 	if (status) {
@@ -299,7 +405,11 @@
 {
 	int status = 0;
 
-	status = ocrdma_mbx_dealloc_pd(dev, pd);
+	if (dev->pd_mgr->pd_prealloc_valid)
+		status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+	else
+		status = ocrdma_mbx_dealloc_pd(dev, pd);
+
 	kfree(pd);
 	return status;
 }
@@ -325,7 +435,6 @@
 
 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
 {
-	int status = 0;
 	struct ocrdma_pd *pd = uctx->cntxt_pd;
 	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
@@ -334,8 +443,8 @@
 		       __func__, dev->id, pd->id);
 	}
 	uctx->cntxt_pd = NULL;
-	status = _ocrdma_dealloc_pd(dev, pd);
-	return status;
+	(void)_ocrdma_dealloc_pd(dev, pd);
+	return 0;
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@
 	if (is_uctx_pd) {
 		ocrdma_release_ucontext_pd(uctx);
 	} else {
-		status = ocrdma_mbx_dealloc_pd(dev, pd);
+		status = _ocrdma_dealloc_pd(dev, pd);
 		kfree(pd);
 	}
 exit:
@@ -837,9 +946,8 @@
 {
 	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
 	struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
-	int status;
 
-	status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+	(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
 
 	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
 
@@ -850,11 +958,10 @@
 
 	/* Don't stop cleanup, in case FW is unresponsive */
 	if (dev->mqe_ctx.fw_error_state) {
-		status = 0;
 		pr_err("%s(%d) fw not responding.\n",
 		       __func__, dev->id);
 	}
-	return status;
+	return 0;
 }
 
 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@
 
 int ocrdma_destroy_cq(struct ib_cq *ibcq)
 {
-	int status;
 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
 	struct ocrdma_eq *eq = NULL;
 	struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@
 	synchronize_irq(irq);
 	ocrdma_flush_cq(cq);
 
-	status = ocrdma_mbx_destroy_cq(dev, cq);
+	(void)ocrdma_mbx_destroy_cq(dev, cq);
 	if (cq->ucontext) {
 		pdid = cq->ucontext->cntxt_pd->id;
 		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@
 	}
 
 	kfree(cq);
-	return status;
+	return 0;
 }
 
 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@
 	int status = 0;
 	u64 usr_db;
 	struct ocrdma_create_qp_uresp uresp;
-	struct ocrdma_dev *dev = qp->dev;
 	struct ocrdma_pd *pd = qp->pd;
+	struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
 	memset(&uresp, 0, sizeof(uresp));
 	usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@
 		status = -ENOMEM;
 		goto gen_err;
 	}
-	qp->dev = dev;
 	ocrdma_set_qp_init_params(qp, pd, attrs);
 	if (udata == NULL)
 		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@
 	enum ib_qp_state old_qps;
 
 	qp = get_ocrdma_qp(ibqp);
-	dev = qp->dev;
+	dev = get_ocrdma_dev(ibqp->device);
 	if (attr_mask & IB_QP_STATE)
 		status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
 	/* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@
 	enum ib_qp_state old_qps, new_qps;
 
 	qp = get_ocrdma_qp(ibqp);
-	dev = qp->dev;
+	dev = get_ocrdma_dev(ibqp->device);
 
 	/* syncronize with multiple context trying to change, retrive qps */
 	mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@
 	u32 qp_state;
 	struct ocrdma_qp_params params;
 	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
-	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
 
 	memset(&params, 0, sizeof(params));
 	mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@
 		goto mbx_err;
 	if (qp->qp_type == IB_QPT_UD)
 		qp_attr->qkey = params.qkey;
-	qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
-	qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
 	qp_attr->path_mtu =
 		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
 				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@
 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
 	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
 		    OCRDMA_QP_PARAMS_STATE_SHIFT;
+	qp_attr->qp_state = get_ibqp_state(qp_state);
+	qp_attr->cur_qp_state = qp_attr->qp_state;
 	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
 	qp_attr->max_dest_rd_atomic =
 	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@
 	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
 	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
 				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+	/* Sync driver QP state with FW */
+	ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
 mbx_err:
 	return status;
 }
 
-static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
 {
-	int i = idx / 32;
-	unsigned int mask = (1 << (idx % 32));
+	unsigned int i = idx / 32;
+	u32 mask = (1U << (idx % 32));
 
-	if (srq->idx_bit_fields[i] & mask)
-		srq->idx_bit_fields[i] &= ~mask;
-	else
-		srq->idx_bit_fields[i] |= mask;
+	srq->idx_bit_fields[i] ^= mask;
 }
 
 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@
 {
 	int found = false;
 	unsigned long flags;
-	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 	/* sync with any active CQ poll */
 
 	spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@
 
 int ocrdma_destroy_qp(struct ib_qp *ibqp)
 {
-	int status;
 	struct ocrdma_pd *pd;
 	struct ocrdma_qp *qp;
 	struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@
 	unsigned long flags;
 
 	qp = get_ocrdma_qp(ibqp);
-	dev = qp->dev;
+	dev = get_ocrdma_dev(ibqp->device);
 
 	attrs.qp_state = IB_QPS_ERR;
 	pd = qp->pd;
@@ -1635,7 +1738,7 @@
 	 * discarded until the old CQEs are discarded.
 	 */
 	mutex_lock(&dev->dev_lock);
-	status = ocrdma_mbx_destroy_qp(dev, qp);
+	(void) ocrdma_mbx_destroy_qp(dev, qp);
 
 	/*
 	 * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@
 	kfree(qp->wqe_wr_id_tbl);
 	kfree(qp->rqe_wr_id_tbl);
 	kfree(qp);
-	return status;
+	return 0;
 }
 
 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@
 	else
 		ud_hdr->qkey = wr->wr.ud.remote_qkey;
 	ud_hdr->rsvd_ahid = ah->id;
+	if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
+		hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
 }
 
 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@
 	u64 fbo;
 	struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
 	struct ocrdma_mr *mr;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 	u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
 
 	wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
 
-	if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
+	if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
 		return -EINVAL;
 
 	hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@
 	fast_reg->size_sge =
 		get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
 	mr = (struct ocrdma_mr *) (unsigned long)
-		qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
+		dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
 	build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
 	return 0;
 }
@@ -2112,8 +2218,6 @@
 			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
 			status = ocrdma_build_write(qp, hdr, wr);
 			break;
-		case IB_WR_RDMA_READ_WITH_INV:
-			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
 		case IB_WR_RDMA_READ:
 			ocrdma_build_read(qp, hdr, wr);
 			break;
@@ -2484,8 +2588,11 @@
 				 bool *polled, bool *stop)
 {
 	bool expand;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
 		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+	if (status < OCRDMA_MAX_CQE_ERR)
+		atomic_inc(&dev->cqe_err_stats[status]);
 
 	/* when hw sq is empty, but rq is not empty, so we continue
 	 * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@
 				int status)
 {
 	bool expand;
+	struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+	if (status < OCRDMA_MAX_CQE_ERR)
+		atomic_inc(&dev->cqe_err_stats[status]);
 
 	/* when hw_rq is empty, but wq is not empty, so continue
 	 * to keep the cqe to get the cq event again.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index c00ae09..ffd48bf 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1082,12 +1082,6 @@
 	/* control high-level access to EEPROM */
 	struct mutex eep_lock;
 	uint64_t traffic_wds;
-	/* active time is kept in seconds, but logged in hours */
-	atomic_t active_time;
-	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
-	uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
-	uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
-	uint16_t eep_hrs;
 	/*
 	 * masks for which bits of errs, hwerrs that cause
 	 * each of the counters to increment.
@@ -1309,8 +1303,7 @@
 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
 		    const void *buffer, int len);
 void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
 void qib_dump_lookup_output_queue(struct qib_devdata *);
 void qib_force_pio_avail_update(struct qib_devdata *);
 void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@
  * Flush write combining store buffers (if present) and perform a write
  * barrier.
  */
+static inline void qib_flush_wc(void)
+{
 #if defined(CONFIG_X86_64)
-#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+	asm volatile("sfence" : : : "memory");
 #else
-#define qib_flush_wc() wmb() /* no reorder around wc flush */
+	wmb(); /* no reorder around wc flush */
 #endif
+}
 
 /* global module parameter variables */
 extern unsigned qib_ibmtu;
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 5670ace..4fb78ab 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -257,7 +257,7 @@
 
 	/* shared memory page for send buffer disarm status */
 	__u64 spi_sendbuf_status;
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /*
  * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@
 	 */
 	__u64 spu_base_info;
 
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /* User commands. */
 
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 6abd3ed..5e75b43 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -255,7 +255,6 @@
 	DEBUGFS_FILE_CREATE(opcode_stats);
 	DEBUGFS_FILE_CREATE(ctx_stats);
 	DEBUGFS_FILE_CREATE(qp_stats);
-	return;
 }
 
 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 5dfda4c..8c34b23 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -85,7 +85,7 @@
 		client_pool = dc->next;
 	else
 		/* None in pool, alloc and init */
-		dc = kmalloc(sizeof *dc, GFP_KERNEL);
+		dc = kmalloc(sizeof(*dc), GFP_KERNEL);
 
 	if (dc) {
 		dc->next = NULL;
@@ -257,6 +257,7 @@
 	if (dd->userbase) {
 		/* If user regs mapped, they are after send, so set limit. */
 		u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
 		if (!dd->piovl15base)
 			snd_lim = dd->uregbase;
 		krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@
 	snd_bottom = dd->pio2k_bufbase;
 	if (snd_lim == 0) {
 		u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
 		snd_lim = snd_bottom + tot2k;
 	}
 	/* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@
 	/* not very efficient, but it works for now */
 	while (reg_addr < reg_end) {
 		u64 data;
+
 		if (copy_from_user(&data, uaddr, sizeof(data))) {
 			ret = -EFAULT;
 			goto bail;
@@ -698,7 +701,7 @@
 
 	if (!dd || !op)
 		return -EINVAL;
-	olp = vmalloc(sizeof *olp);
+	olp = vmalloc(sizeof(*olp));
 	if (!olp) {
 		pr_err("vmalloc for observer failed\n");
 		return -ENOMEM;
@@ -796,6 +799,7 @@
 		op = diag_get_observer(dd, *off);
 		if (op) {
 			u32 offset = *off;
+
 			ret = op->hook(dd, op, offset, &data64, 0, use_32);
 		}
 		/*
@@ -873,6 +877,7 @@
 		if (count == 4 || count == 8) {
 			u64 data64;
 			u32 offset = *off;
+
 			ret = copy_from_user(&data64, data, count);
 			if (ret) {
 				ret = -EFAULT;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5bee08f..f58fdc3 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -86,7 +86,7 @@
 {
 	static char iname[16];
 
-	snprintf(iname, sizeof iname, "infinipath%u", unit);
+	snprintf(iname, sizeof(iname), "infinipath%u", unit);
 	return iname;
 }
 
@@ -349,6 +349,7 @@
 		qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
 		if (qp_num != QIB_MULTICAST_QPN) {
 			int ruc_res;
+
 			qp = qib_lookup_qpn(ibp, qp_num);
 			if (!qp)
 				goto drop;
@@ -461,6 +462,7 @@
 	rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 	if (dd->flags & QIB_NODMA_RTAIL) {
 		u32 seq = qib_hdrget_seq(rhf_addr);
+
 		if (seq != rcd->seq_cnt)
 			goto bail;
 		hdrqtail = 0;
@@ -651,6 +653,7 @@
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
 	struct qib_devdata *dd = ppd->dd;
+
 	ppd->lid = lid;
 	ppd->lmc = lmc;
 
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 4d5d71a..311ee6c 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -153,6 +153,7 @@
 
 	if (t && dd0->nguid > 1 && t <= dd0->nguid) {
 		u8 oguid;
+
 		dd->base_guid = dd0->base_guid;
 		bguid = (u8 *) &dd->base_guid;
 
@@ -251,206 +252,25 @@
 		 * This board has a Serial-prefix, which is stored
 		 * elsewhere for backward-compatibility.
 		 */
-		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-		snp[sizeof ifp->if_sprefix] = '\0';
+		memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
+		snp[sizeof(ifp->if_sprefix)] = '\0';
 		len = strlen(snp);
 		snp += len;
-		len = (sizeof dd->serial) - len;
-		if (len > sizeof ifp->if_serial)
-			len = sizeof ifp->if_serial;
+		len = sizeof(dd->serial) - len;
+		if (len > sizeof(ifp->if_serial))
+			len = sizeof(ifp->if_serial);
 		memcpy(snp, ifp->if_serial, len);
-	} else
-		memcpy(dd->serial, ifp->if_serial,
-		       sizeof ifp->if_serial);
+	} else {
+		memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
+	}
 	if (!strstr(ifp->if_comment, "Tested successfully"))
 		qib_dev_err(dd,
 			"Board SN %s did not pass functional test: %s\n",
 			dd->serial, ifp->if_comment);
 
-	memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
-	/*
-	 * Power-on (actually "active") hours are kept as little-endian value
-	 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-	 * atomic_t while running.
-	 */
-	atomic_set(&dd->active_time, 0);
-	dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
 done:
 	vfree(buf);
 
 bail:;
 }
 
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
-	void *buf;
-	struct qib_flash *ifp;
-	int len, hi_water;
-	uint32_t new_time, new_hrs;
-	u8 csum;
-	int ret, idx;
-	unsigned long flags;
-
-	/* first, check if we actually need to do anything. */
-	ret = 0;
-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-		if (dd->eep_st_new_errs[idx]) {
-			ret = 1;
-			break;
-		}
-	}
-	new_time = atomic_read(&dd->active_time);
-
-	if (ret == 0 && new_time < 3600)
-		goto bail;
-
-	/*
-	 * The quick-check above determined that there is something worthy
-	 * of logging, so get current contents and do a more detailed idea.
-	 * read full flash, not just currently used part, since it may have
-	 * been written with a newer definition
-	 */
-	len = sizeof(struct qib_flash);
-	buf = vmalloc(len);
-	ret = 1;
-	if (!buf) {
-		qib_dev_err(dd,
-			"Couldn't allocate memory to read %u bytes from eeprom for logging\n",
-			len);
-		goto bail;
-	}
-
-	/* Grab semaphore and read current EEPROM. If we get an
-	 * error, let go, but if not, keep it until we finish write.
-	 */
-	ret = mutex_lock_interruptible(&dd->eep_lock);
-	if (ret) {
-		qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-		goto free_bail;
-	}
-	ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
-	if (ret) {
-		mutex_unlock(&dd->eep_lock);
-		qib_dev_err(dd, "Unable read EEPROM for logging\n");
-		goto free_bail;
-	}
-	ifp = (struct qib_flash *)buf;
-
-	csum = flash_csum(ifp, 0);
-	if (csum != ifp->if_csum) {
-		mutex_unlock(&dd->eep_lock);
-		qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-			    csum, ifp->if_csum);
-		ret = 1;
-		goto free_bail;
-	}
-	hi_water = 0;
-	spin_lock_irqsave(&dd->eep_st_lock, flags);
-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-		int new_val = dd->eep_st_new_errs[idx];
-		if (new_val) {
-			/*
-			 * If we have seen any errors, add to EEPROM values
-			 * We need to saturate at 0xFF (255) and we also
-			 * would need to adjust the checksum if we were
-			 * trying to minimize EEPROM traffic
-			 * Note that we add to actual current count in EEPROM,
-			 * in case it was altered while we were running.
-			 */
-			new_val += ifp->if_errcntp[idx];
-			if (new_val > 0xFF)
-				new_val = 0xFF;
-			if (ifp->if_errcntp[idx] != new_val) {
-				ifp->if_errcntp[idx] = new_val;
-				hi_water = offsetof(struct qib_flash,
-						    if_errcntp) + idx;
-			}
-			/*
-			 * update our shadow (used to minimize EEPROM
-			 * traffic), to match what we are about to write.
-			 */
-			dd->eep_st_errs[idx] = new_val;
-			dd->eep_st_new_errs[idx] = 0;
-		}
-	}
-	/*
-	 * Now update active-time. We would like to round to the nearest hour
-	 * but unless atomic_t are sure to be proper signed ints we cannot,
-	 * because we need to account for what we "transfer" to EEPROM and
-	 * if we log an hour at 31 minutes, then we would need to set
-	 * active_time to -29 to accurately count the _next_ hour.
-	 */
-	if (new_time >= 3600) {
-		new_hrs = new_time / 3600;
-		atomic_sub((new_hrs * 3600), &dd->active_time);
-		new_hrs += dd->eep_hrs;
-		if (new_hrs > 0xFFFF)
-			new_hrs = 0xFFFF;
-		dd->eep_hrs = new_hrs;
-		if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-			ifp->if_powerhour[0] = new_hrs & 0xFF;
-			hi_water = offsetof(struct qib_flash, if_powerhour);
-		}
-		if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-			ifp->if_powerhour[1] = new_hrs >> 8;
-			hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
-		}
-	}
-	/*
-	 * There is a tiny possibility that we could somehow fail to write
-	 * the EEPROM after updating our shadows, but problems from holding
-	 * the spinlock too long are a much bigger issue.
-	 */
-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-	if (hi_water) {
-		/* we made some change to the data, uopdate cksum and write */
-		csum = flash_csum(ifp, 1);
-		ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
-	}
-	mutex_unlock(&dd->eep_lock);
-	if (ret)
-		qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-	vfree(buf);
-bail:
-	return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
-	uint new_val;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dd->eep_st_lock, flags);
-	new_val = dd->eep_st_new_errs[eidx] + incr;
-	if (new_val > 255)
-		new_val = 255;
-	dd->eep_st_new_errs[eidx] = new_val;
-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b15e34e..41937c6 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -351,9 +351,10 @@
 		 * unless perhaps the user has mpin'ed the pages
 		 * themselves.
 		 */
-		qib_devinfo(dd->pcidev,
-			 "Failed to lock addr %p, %u pages: "
-			 "errno %d\n", (void *) vaddr, cnt, -ret);
+		qib_devinfo(
+			dd->pcidev,
+			"Failed to lock addr %p, %u pages: errno %d\n",
+			(void *) vaddr, cnt, -ret);
 		goto done;
 	}
 	for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@
 			goto cleanup;
 		}
 		if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-				 tidmap, sizeof tidmap)) {
+				 tidmap, sizeof(tidmap))) {
 			ret = -EFAULT;
 			goto cleanup;
 		}
@@ -484,7 +485,7 @@
 	}
 
 	if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-			   sizeof tidmap)) {
+			   sizeof(tidmap))) {
 		ret = -EFAULT;
 		goto done;
 	}
@@ -951,8 +952,8 @@
 		/* rcvegrbufs are read-only on the slave */
 		if (vma->vm_flags & VM_WRITE) {
 			qib_devinfo(dd->pcidev,
-				 "Can't map eager buffers as "
-				 "writable (flags=%lx)\n", vma->vm_flags);
+				 "Can't map eager buffers as writable (flags=%lx)\n",
+				 vma->vm_flags);
 			ret = -EPERM;
 			goto bail;
 		}
@@ -1185,6 +1186,7 @@
 	 */
 	if (weight >= qib_cpulist_count) {
 		int cpu;
+
 		cpu = find_first_zero_bit(qib_cpulist,
 					  qib_cpulist_count);
 		if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@
 	if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
 		uinfo->spu_userversion & 0xffff)) {
 		qib_devinfo(dd->pcidev,
-			 "Mismatched user version (%d.%d) and driver "
-			 "version (%d.%d) while context sharing. Ensure "
-			 "that driver and library are from the same "
-			 "release.\n",
+			 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
 			 (int) (uinfo->spu_userversion >> 16),
 			 (int) (uinfo->spu_userversion & 0xffff),
 			 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@
 	}
 	if (!ppd) {
 		u32 pidx = ctxt % dd->num_pports;
+
 		if (usable(dd->pport + pidx))
 			ppd = dd->pport + pidx;
 		else {
@@ -1438,10 +1438,12 @@
 
 	if (alg == QIB_PORT_ALG_ACROSS) {
 		unsigned inuse = ~0U;
+
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
 			unsigned cused = 0, cfree = 0, pusable = 0;
+
 			if (!dd)
 				continue;
 			if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@
 	} else {
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
+
 			if (dd) {
 				ret = choose_port_ctxt(fp, dd, port, uinfo);
 				if (!ret)
@@ -1556,6 +1559,7 @@
 	}
 	for (ndev = 0; ndev < devmax; ndev++) {
 		struct qib_devdata *dd = qib_lookup(ndev);
+
 		if (dd) {
 			if (pcibus_to_node(dd->pcidev->bus) < 0) {
 				ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 8185458..650897a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -106,7 +106,7 @@
 {
 	qib_stats.sps_ints = qib_sps_ints();
 	return simple_read_from_buffer(buf, count, ppos, &qib_stats,
-				       sizeof qib_stats);
+				       sizeof(qib_stats));
 }
 
 /*
@@ -133,7 +133,7 @@
 				 size_t count, loff_t *ppos)
 {
 	return simple_read_from_buffer(buf, count, ppos, qib_statnames,
-		sizeof qib_statnames - 1); /* no null */
+		sizeof(qib_statnames) - 1); /* no null */
 }
 
 static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@
 	int ret, i;
 
 	/* create the per-unit directory */
-	snprintf(unit, sizeof unit, "%u", dd->unit);
+	snprintf(unit, sizeof(unit), "%u", dd->unit);
 	ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
 			  &simple_dir_operations, dd);
 	if (ret) {
@@ -455,7 +455,7 @@
 	}
 
 	spin_lock(&tmp->d_lock);
-	if (!(d_unhashed(tmp) && tmp->d_inode)) {
+	if (!d_unhashed(tmp) && tmp->d_inode) {
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
 		simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@
 
 	root = dget(sb->s_root);
 	mutex_lock(&root->d_inode->i_mutex);
-	snprintf(unit, sizeof unit, "%u", dd->unit);
+	snprintf(unit, sizeof(unit), "%u", dd->unit);
 	dir = lookup_one_len(unit, root, strlen(unit));
 
 	if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@
 			const char *dev_name, void *data)
 {
 	struct dentry *ret;
+
 	ret = mount_single(fs_type, flags, data, qibfs_fill_super);
 	if (!IS_ERR(ret))
 		qib_super = ret->d_sb;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d68266a..0d2ba59 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -333,6 +333,7 @@
 				  enum qib_ureg regno, u64 value, int ctxt)
 {
 	u64 __iomem *ubase;
+
 	if (dd->userbase)
 		ubase = (u64 __iomem *)
 			((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@
 		bits = (u32) ((hwerrs >>
 			       QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
 			      QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
 			 "[PCIe Mem Parity Errs %x] ", bits);
 		strlcat(msg, bitsmsg, msgl);
 	}
 
 	if (hwerrs & _QIB_PLL_FAIL) {
 		isfatal = 1;
-		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
 			 "[PLL failed (%llx), InfiniPath hardware unusable]",
 			 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
 		strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@
 
 	/* do these first, they are most important */
 	if (errs & ERR_MASK(HardwareErr))
-		qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+		qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
 	else
 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@
 	 */
 	mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
 		ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
-	qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+	qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
 	if (errs & E_SUM_PKTERRS)
 		qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@
 		}
 		if (crcs) {
 			u32 cntr = dd->cspec->lli_counter;
+
 			cntr += crcs;
 			if (cntr) {
 				if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@
 			"irq is 0, BIOS error?  Interrupts won't work\n");
 	else {
 		int ret;
+
 		ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
 				  QIB_DRV_NAME, dd);
 		if (ret)
@@ -2681,8 +2684,6 @@
 	spin_lock_irqsave(&dd->eep_st_lock, flags);
 	traffic_wds -= dd->traffic_wds;
 	dd->traffic_wds += traffic_wds;
-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-		atomic_add(5, &dd->active_time); /* S/B #define */
 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 
 	qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
 	int ret = 0;
+
 	if (!strncmp(what, "ibc", 3)) {
 		ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
 	u32 cregbase;
+
 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
 	dd->cspec->cregbase = (u64 __iomem *)
 		((char __iomem *) dd->kregbase + cregbase);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 7dec89f..22affda 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -902,7 +902,8 @@
 	errs &= QLOGIC_IB_E_SDMAERRS;
 
 	msg = dd->cspec->sdmamsgbuf;
-	qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+	qib_decode_7220_sdma_errs(ppd, errs, msg,
+		sizeof(dd->cspec->sdmamsgbuf));
 	spin_lock_irqsave(&ppd->sdma_lock, flags);
 
 	if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@
 static void reenable_7220_chase(unsigned long opaque)
 {
 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
 	ppd->cpspec->chase_timer.expires = 0;
 	qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@
 
 	/* do these first, they are most important */
 	if (errs & ERR_MASK(HardwareErr))
-		qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+		qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
 	else
 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@
 		ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
 		ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
 
-	qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+	qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
 	if (errs & E_SUM_PKTERRS)
 		qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@
 		bits = (u32) ((hwerrs >>
 			       QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
 			      QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
 			 "[PCIe Mem Parity Errs %x] ", bits);
 		strlcat(msg, bitsmsg, msgl);
 	}
@@ -1390,7 +1392,7 @@
 
 	if (hwerrs & _QIB_PLL_FAIL) {
 		isfatal = 1;
-		snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
 			 "[PLL failed (%llx), InfiniPath hardware unusable]",
 			 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
 		strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@
 	spin_lock_irqsave(&dd->eep_st_lock, flags);
 	traffic_wds -= dd->traffic_wds;
 	dd->traffic_wds += traffic_wds;
-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-		atomic_add(5, &dd->active_time); /* S/B #define */
 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 done:
 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index a7eb325..ef97b71 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -117,7 +117,7 @@
 
 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation, \
+MODULE_PARM_DESC(long_attenuation,
 		 "attenuation cutoff (dB) for long copper cable setup");
 
 static ushort qib_singleport;
@@ -153,11 +153,12 @@
 static int  setup_txselect(const char *, struct kernel_param *);
 module_param_call(txselect, setup_txselect, param_get_string,
 		  &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect, \
+MODULE_PARM_DESC(txselect,
 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 
 #define BOARD_QME7342 5
 #define BOARD_QMH7342 6
+#define BOARD_QMH7360 9
 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 		    BOARD_QMH7342)
 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@
 				  enum qib_ureg regno, u64 value, int ctxt)
 {
 	u64 __iomem *ubase;
+
 	if (dd->userbase)
 		ubase = (u64 __iomem *)
 			((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@
 	/* do these first, they are most important */
 	if (errs & QIB_E_HARDWARE) {
 		*msg = '\0';
-		qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
 	} else
 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@
 	mask = QIB_E_HARDWARE;
 	*msg = '\0';
 
-	err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
 		   qib_7322error_msgs);
 
 	/*
@@ -1889,10 +1891,10 @@
 	*msg = '\0';
 
 	if (errs & ~QIB_E_P_BITSEXTANT) {
-		err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
 		if (!*msg)
-			snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
 				 "no others");
 		qib_dev_porterr(dd, ppd->port,
 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@
 		/* determine cause, then write to clear */
 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
-		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
 			   hdrchk_msgs);
 		*msg = '\0';
 		/* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@
 			 * isn't valid.  We don't want to confuse people, so
 			 * we just don't print them, except at debug
 			 */
-			err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
 				   (errs & QIB_E_P_LINK_PKTERRS),
 				   qib_7322p_error_msgs);
 			*msg = '\0';
@@ -1938,7 +1940,7 @@
 		 * valid.  We don't want to confuse people, so we just
 		 * don't print them, except at debug
 		 */
-		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
 			   qib_7322p_error_msgs);
 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
 		*msg = '\0';
@@ -2031,6 +2033,7 @@
 		if (dd->cspec->num_msix_entries) {
 			/* and same for MSIx */
 			u64 val = qib_read_kreg64(dd, kr_intgranted);
+
 			if (val)
 				qib_write_kreg(dd, kr_intgranted, val);
 		}
@@ -2176,6 +2179,7 @@
 		int err;
 		unsigned long flags;
 		struct qib_pportdata *ppd = dd->pport;
+
 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
 			err = 0;
 			if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@
 
 	if (n->rcv) {
 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
 		qib_update_rhdrq_dca(rcd, cpu);
 	} else {
 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
 		qib_update_sdma_dca(ppd, cpu);
 	}
 }
@@ -2816,9 +2822,11 @@
 
 	if (n->rcv) {
 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
 		dd = rcd->dd;
 	} else {
 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
 		dd = ppd->dd;
 	}
 	qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@
 		struct qib_pportdata *ppd;
 		struct qib_qsfp_data *qd;
 		u32 mask;
+
 		if (!dd->pport[pidx].link_speed_supported)
 			continue;
 		mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@
 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
 			u64 pins;
+
 			qd = &ppd->cpspec->qsfp_data;
 			gpiostatus &= ~mask;
 			pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@
 	}
 
 	/* Try to get MSIx interrupts */
-	memset(redirect, 0, sizeof redirect);
+	memset(redirect, 0, sizeof(redirect));
 	mask = ~0ULL;
 	msixnum = 0;
 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@
 		n = "InfiniPath_QME7362";
 		dd->flags |= QIB_HAS_QSFP;
 		break;
+	case BOARD_QMH7360:
+		n = "Intel IB QDR 1P FLR-QSFP Adptr";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
 	case 15:
 		n = "InfiniPath_QLE7342_TEST";
 		dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@
 	 */
 	for (i = 0; i < msix_entries; i++) {
 		u64 vecaddr, vecdata;
+
 		vecaddr = qib_read_kreg64(dd, 2 * i +
 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@
 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
 		traffic_wds -= ppd->dd->traffic_wds;
 		ppd->dd->traffic_wds += traffic_wds;
-		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
 						QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
 	u64 newctrlb;
+
 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
 				    IBA7322_IBC_IBTA_1_2_MASK |
 				    IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
 	u32 cregbase;
+
 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@
 	struct qib_devdata *dd;
 	unsigned long val;
 	char *n;
+
 	if (strlen(str) >= MAX_ATTEN_LEN) {
 		pr_info("txselect_values string too long\n");
 		return -ENOSPC;
@@ -6393,6 +6409,7 @@
 	val = TIDFLOW_ERRBITS; /* these are W1C */
 	for (i = 0; i < dd->cfgctxts; i++) {
 		int flow;
+
 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
 	}
@@ -6503,6 +6520,7 @@
 
 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
 		struct qib_chippport_specific *cp = ppd->cpspec;
+
 		ppd->link_speed_supported = features & PORT_SPD_CAP;
 		features >>=  PORT_SPD_CAP_SHIFT;
 		if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@
 				ppd->vls_supported = IB_VL_VL0_7;
 			else {
 				qib_devinfo(dd->pcidev,
-					    "Invalid num_vls %u for MTU %d "
-					    ", using 4 VLs\n",
+					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
 					    qib_num_cfg_vls, mtu);
 				ppd->vls_supported = IB_VL_VL0_3;
 				qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
 	int ret = 0;
+
 	if (ppd->dd->cspec->r1)
 		ret = serdes_7322_init_old(ppd);
 	else
@@ -8305,8 +8323,8 @@
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-	u64 val;
-	val = SJA_EN;
+	u64 val = SJA_EN;
+
 	qib_write_kreg(dd, kr_r_access, val);
 	qib_read_kreg32(dd, kr_scratch);
 	return 0;
@@ -8319,6 +8337,7 @@
 {
 	u64 val;
 	int timeout;
+
 	for (timeout = 0; timeout < 100 ; ++timeout) {
 		val = qib_read_kreg32(dd, kr_r_access);
 		if (val & R_RDY)
@@ -8346,6 +8365,7 @@
 		}
 		if (inp) {
 			int tdi = inp[pos >> 3] >> (pos & 7);
+
 			val |= ((tdi & 1) << R_TDI_LSB);
 		}
 		qib_write_kreg(dd, kr_r_access, val);
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 729da39c..2ee3695 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -140,7 +140,7 @@
 	 * Allocate full ctxtcnt array, rather than just cfgctxts, because
 	 * cleanup iterates across all possible ctxts.
 	 */
-	dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+	dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
 	if (!dd->rcd) {
 		qib_dev_err(dd,
 			"Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@
 			u8 hw_pidx, u8 port)
 {
 	int size;
+
 	ppd->dd = dd;
 	ppd->hw_pidx = hw_pidx;
 	ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@
 		ppd = dd->pport + pidx;
 		if (!ppd->qib_wq) {
 			char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
 			snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
 				dd->unit, pidx);
 			ppd->qib_wq =
@@ -714,6 +716,7 @@
 
 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
 		int mtu;
+
 		if (lastfail)
 			ret = lastfail;
 		ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@
 		qib_free_pportdata(ppd);
 	}
 
-	qib_update_eeprom_log(dd);
 }
 
 /**
@@ -1026,8 +1028,7 @@
 	addr = vmalloc(cnt);
 	if (!addr) {
 		qib_devinfo(dd->pcidev,
-			 "Couldn't get memory for checking PIO perf,"
-			 " skipping\n");
+			 "Couldn't get memory for checking PIO perf, skipping\n");
 		goto done;
 	}
 
@@ -1163,6 +1164,7 @@
 
 	if (!qib_cpulist_count) {
 		u32 count = num_online_cpus();
+
 		qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
 				      sizeof(long), GFP_KERNEL);
 		if (qib_cpulist)
@@ -1179,7 +1181,7 @@
 	if (!list_empty(&dd->list))
 		list_del_init(&dd->list);
 	ib_dealloc_device(&dd->verbs_dev.ibdev);
-	return ERR_PTR(ret);;
+	return ERR_PTR(ret);
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index f4918f2..086616d 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -168,7 +168,6 @@
 	ppd->lastibcstat = ibcs;
 	if (ev)
 		signal_ib_event(ppd, ev);
-	return;
 }
 
 void qib_clear_symerror_on_linkup(unsigned long opaque)
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 3b9afcc..ad843c7 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -122,10 +122,10 @@
 	if (!mr->lkey_published)
 		goto out;
 	if (lkey == 0)
-		rcu_assign_pointer(dev->dma_mr, NULL);
+		RCU_INIT_POINTER(dev->dma_mr, NULL);
 	else {
 		r = lkey >> (32 - ib_qib_lkey_table_size);
-		rcu_assign_pointer(rkt->table[r], NULL);
+		RCU_INIT_POINTER(rkt->table[r], NULL);
 	}
 	qib_put_mr(mr);
 	mr->lkey_published = 0;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 636be11..395f404 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -152,14 +152,14 @@
 	data.trap_num = trap_num;
 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
 	data.toggle_count = 0;
-	memset(&data.details, 0, sizeof data.details);
+	memset(&data.details, 0, sizeof(data.details));
 	data.details.ntc_257_258.lid1 = lid1;
 	data.details.ntc_257_258.lid2 = lid2;
 	data.details.ntc_257_258.key = cpu_to_be32(key);
 	data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
 	data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
 
-	qib_send_trap(ibp, &data, sizeof data);
+	qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -176,7 +176,7 @@
 	data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
 	data.toggle_count = 0;
-	memset(&data.details, 0, sizeof data.details);
+	memset(&data.details, 0, sizeof(data.details));
 	data.details.ntc_256.lid = data.issuer_lid;
 	data.details.ntc_256.method = smp->method;
 	data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@
 		       hop_cnt);
 	}
 
-	qib_send_trap(ibp, &data, sizeof data);
+	qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -214,11 +214,11 @@
 	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
 	data.toggle_count = 0;
-	memset(&data.details, 0, sizeof data.details);
+	memset(&data.details, 0, sizeof(data.details));
 	data.details.ntc_144.lid = data.issuer_lid;
 	data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
 
-	qib_send_trap(ibp, &data, sizeof data);
+	qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -234,11 +234,11 @@
 	data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
 	data.toggle_count = 0;
-	memset(&data.details, 0, sizeof data.details);
+	memset(&data.details, 0, sizeof(data.details));
 	data.details.ntc_145.lid = data.issuer_lid;
 	data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
 
-	qib_send_trap(ibp, &data, sizeof data);
+	qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -254,12 +254,12 @@
 	data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
 	data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
 	data.toggle_count = 0;
-	memset(&data.details, 0, sizeof data.details);
+	memset(&data.details, 0, sizeof(data.details));
 	data.details.ntc_144.lid = data.issuer_lid;
 	data.details.ntc_144.local_changes = 1;
 	data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
 
-	qib_send_trap(ibp, &data, sizeof data);
+	qib_send_trap(ibp, &data, sizeof(data));
 }
 
 static int subn_get_nodedescription(struct ib_smp *smp,
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
index 8b73a11..146cf29 100644
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -134,7 +134,7 @@
 					   void *obj) {
 	struct qib_mmap_info *ip;
 
-	ip = kmalloc(sizeof *ip, GFP_KERNEL);
+	ip = kmalloc(sizeof(*ip), GFP_KERNEL);
 	if (!ip)
 		goto bail;
 
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index a77fb4f..c4473db 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -55,7 +55,7 @@
 
 	m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
 	for (; i < m; i++) {
-		mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+		mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
 		if (!mr->map[i])
 			goto bail;
 	}
@@ -104,7 +104,7 @@
 		goto bail;
 	}
 
-	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
@@ -143,7 +143,7 @@
 
 	/* Allocate struct plus pointers to first level page tables. */
 	m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-	mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+	mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
 	if (!mr)
 		goto bail;
 
@@ -347,7 +347,7 @@
 	if (size > PAGE_SIZE)
 		return ERR_PTR(-EINVAL);
 
-	pl = kzalloc(sizeof *pl, GFP_KERNEL);
+	pl = kzalloc(sizeof(*pl), GFP_KERNEL);
 	if (!pl)
 		return ERR_PTR(-ENOMEM);
 
@@ -386,7 +386,7 @@
 
 	/* Allocate struct plus pointers to first level page tables. */
 	m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
-	fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+	fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
 	if (!fmr)
 		goto bail;
 
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 61a0046..4758a380 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -210,7 +210,7 @@
 	/* We can't pass qib_msix_entry array to qib_msix_setup
 	 * so use a dummy msix_entry array and copy the allocated
 	 * irq back to the qib_msix_entry array. */
-	msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
+	msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
 	if (!msix_entry)
 		goto do_intx;
 
@@ -234,8 +234,10 @@
 	kfree(msix_entry);
 
 do_intx:
-	qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
-			"falling back to INTx\n", nvec, ret);
+	qib_dev_err(
+		dd,
+		"pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+		nvec, ret);
 	*msixcnt = 0;
 	qib_enable_intx(dd->pcidev);
 }
@@ -459,6 +461,7 @@
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
 	int r;
+
 	r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
 				   dd->pcibar0);
 	if (r)
@@ -696,6 +699,7 @@
 qib_pci_resume(struct pci_dev *pdev)
 {
 	struct qib_devdata *dd = pci_get_drvdata(pdev);
+
 	qib_devinfo(pdev, "QIB resume function called\n");
 	pci_cleanup_aer_uncorrect_error_status(pdev);
 	/*
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6ddc026..4fa88ba 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -255,10 +255,10 @@
 
 	if (rcu_dereference_protected(ibp->qp0,
 			lockdep_is_held(&dev->qpt_lock)) == qp) {
-		rcu_assign_pointer(ibp->qp0, NULL);
+		RCU_INIT_POINTER(ibp->qp0, NULL);
 	} else if (rcu_dereference_protected(ibp->qp1,
 			lockdep_is_held(&dev->qpt_lock)) == qp) {
-		rcu_assign_pointer(ibp->qp1, NULL);
+		RCU_INIT_POINTER(ibp->qp1, NULL);
 	} else {
 		struct qib_qp *q;
 		struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@
 				lockdep_is_held(&dev->qpt_lock))) != NULL;
 				qpp = &q->next)
 			if (q == qp) {
-				rcu_assign_pointer(*qpp,
+				RCU_INIT_POINTER(*qpp,
 					rcu_dereference_protected(qp->next,
 					 lockdep_is_held(&dev->qpt_lock)));
 				removed = 1;
@@ -315,7 +315,7 @@
 	for (n = 0; n < dev->qp_table_size; n++) {
 		qp = rcu_dereference_protected(dev->qp_table[n],
 			lockdep_is_held(&dev->qpt_lock));
-		rcu_assign_pointer(dev->qp_table[n], NULL);
+		RCU_INIT_POINTER(dev->qp_table[n], NULL);
 
 		for (; qp; qp = rcu_dereference_protected(qp->next,
 					lockdep_is_held(&dev->qpt_lock)))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index fa71b1e..5e27f76 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -81,7 +81,7 @@
 	 * Module could take up to 2 Msec to respond to MOD_SEL, and there
 	 * is no way to tell if it is ready, so we must wait.
 	 */
-	msleep(2);
+	msleep(20);
 
 	/* Make sure TWSI bus is in sane state. */
 	ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@
 	while (cnt < len) {
 		unsigned in_page;
 		int wlen = len - cnt;
+
 		in_page = addr % QSFP_PAGESIZE;
 		if ((in_page + wlen) > QSFP_PAGESIZE)
 			wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@
 	else if (pass)
 		qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
 
-	msleep(2);
+	msleep(20);
 
 bail:
 	mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@
 	 * Module could take up to 2 Msec to respond to MOD_SEL,
 	 * and there is no way to tell if it is ready, so we must wait.
 	 */
-	msleep(2);
+	msleep(20);
 
 	/* Make sure TWSI bus is in sane state. */
 	ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@
 	while (cnt < len) {
 		unsigned in_page;
 		int wlen = len - cnt;
+
 		in_page = addr % QSFP_PAGESIZE;
 		if ((in_page + wlen) > QSFP_PAGESIZE)
 			wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@
 	 * going away, and there is no way to tell if it is ready.
 	 * so we must wait.
 	 */
-	msleep(2);
+	msleep(20);
 
 bail:
 	mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@
 		 * set the page to zero, Even if it already appears to be zero.
 		 */
 		u8 poke = 0;
+
 		ret = qib_qsfp_write(ppd, 127, &poke, 1);
 		udelay(50);
 		if (ret != 1) {
@@ -480,7 +483,6 @@
 	udelay(20); /* Generous RST dwell */
 
 	dd->f_gpio_mod(dd, mask, mask, mask);
-	return;
 }
 
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@
 
 	while (bidx < QSFP_DEFAULT_HDR_CNT) {
 		int iidx;
+
 		ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
 		if (ret < 0)
 			goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2f25018..4544d6f 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1017,7 +1017,7 @@
 		/* Post a send completion queue entry if requested. */
 		if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
 		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof wc);
+			memset(&wc, 0, sizeof(wc));
 			wc.wr_id = wqe->wr.wr_id;
 			wc.status = IB_WC_SUCCESS;
 			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@
 		/* Post a send completion queue entry if requested. */
 		if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
 		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof wc);
+			memset(&wc, 0, sizeof(wc));
 			wc.wr_id = wqe->wr.wr_id;
 			wc.status = IB_WC_SUCCESS;
 			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4c07a8b..f42bd0f 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -247,8 +247,8 @@
 		struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 
 		return ppd->guid;
-	} else
-		return ibp->guids[index - 1];
+	}
+	return ibp->guids[index - 1];
 }
 
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@
 		goto serr;
 	}
 
-	memset(&wc, 0, sizeof wc);
+	memset(&wc, 0, sizeof(wc));
 	send_status = IB_WC_SUCCESS;
 
 	release = 1;
@@ -792,7 +792,7 @@
 	    status != IB_WC_SUCCESS) {
 		struct ib_wc wc;
 
-		memset(&wc, 0, sizeof wc);
+		memset(&wc, 0, sizeof(wc));
 		wc.wr_id = wqe->wr.wr_id;
 		wc.status = status;
 		wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 911205d..c72775f 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -259,6 +259,7 @@
 		 * it again during startup.
 		 */
 		u64 val;
+
 		rst_val &= ~(1ULL);
 		qib_write_kreg(dd, kr_hwerrmask,
 			       dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@
 		 * Both should be clear
 		 */
 		u64 newval = 0;
+
 		qib_write_kreg(dd, acc, newval);
 		/* First read after write is not trustworthy */
 		pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@
 		/* Need to claim */
 		u64 pollval;
 		u64 newval = EPB_ACC_REQ | oct_sel;
+
 		qib_write_kreg(dd, acc, newval);
 		/* First read after write is not trustworthy */
 		pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@
 			if (!sofar) {
 				/* Only set address at start of chunk */
 				int addrbyte = (addr + sofar) >> 8;
+
 				transval = csbit | EPB_MADDRH | addrbyte;
 				tries = epb_trans(dd, trans, transval,
 						  &transval);
@@ -922,7 +926,7 @@
  * IRQ not set up at this point in init, so we poll.
  */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (30)
+#define TRIM_TMO (15)
 
 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
@@ -940,7 +944,7 @@
 			ret = 1;
 			break;
 		}
-		msleep(10);
+		msleep(20);
 	}
 	if (trim_tmo >= TRIM_TMO) {
 		qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@
 		dds_reg_map >>= 4;
 		for (midx = 0; midx < DDS_ROWS; ++midx) {
 			u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
 			data = dds_init_vals[midx].reg_vals[idx];
 			writeq(data, daddr);
 			mmiowb();
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3c8e4e3..81f56cd 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -586,8 +586,8 @@
 		container_of(device, struct qib_ibdev, ibdev.dev);
 	struct qib_devdata *dd = dd_from_dev(dev);
 
-	buf[sizeof dd->serial] = '\0';
-	memcpy(buf, dd->serial, sizeof dd->serial);
+	buf[sizeof(dd->serial)] = '\0';
+	memcpy(buf, dd->serial, sizeof(dd->serial));
 	strcat(buf, "\n");
 	return strlen(buf);
 }
@@ -611,28 +611,6 @@
 	return ret < 0 ? ret : count;
 }
 
-static ssize_t show_logged_errs(struct device *device,
-				struct device_attribute *attr, char *buf)
-{
-	struct qib_ibdev *dev =
-		container_of(device, struct qib_ibdev, ibdev.dev);
-	struct qib_devdata *dd = dd_from_dev(dev);
-	int idx, count;
-
-	/* force consistency with actual EEPROM */
-	if (qib_update_eeprom_log(dd) != 0)
-		return -ENXIO;
-
-	count = 0;
-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-		count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-				   dd->eep_st_errs[idx],
-				   idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
-	}
-
-	return count;
-}
-
 /*
  * Dump tempsense regs. in decimal, to ease shell-scripts.
  */
@@ -679,7 +657,6 @@
 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@
 	&dev_attr_nfreectxts,
 	&dev_attr_serial,
 	&dev_attr_boardversion,
-	&dev_attr_logged_errors,
 	&dev_attr_tempsense,
 	&dev_attr_localbus_info,
 	&dev_attr_chip_reset,
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index 647f7be..f569866 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -105,6 +105,7 @@
 		udelay(2);
 	else {
 		int rise_usec;
+
 		for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
 			if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
 				break;
@@ -326,6 +327,7 @@
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
 	int ret = 1;
+
 	if (flags & QIB_TWSI_START)
 		start_seq(dd);
 
@@ -435,8 +437,7 @@
 	int sub_len;
 	const u8 *bp = buffer;
 	int max_wait_time, i;
-	int ret;
-	ret = 1;
+	int ret = 1;
 
 	while (len > 0) {
 		if (dev == QIB_TWSI_NO_DEV) {
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 31d3561..eface3b 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -180,6 +180,7 @@
 
 	for (i = 0; i < cnt; i++) {
 		int which;
+
 		if (!test_bit(i, mask))
 			continue;
 		/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index aaf7039..26243b7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -127,7 +127,7 @@
 	 * present on the wire.
 	 */
 	length = swqe->length;
-	memset(&wc, 0, sizeof wc);
+	memset(&wc, 0, sizeof(wc));
 	wc.byte_len = length + sizeof(struct ib_grh);
 
 	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index d2806ca..3e0677c 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -50,7 +50,7 @@
 /* expected size of headers (for dma_pool) */
 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
 /* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
 
 /*
  * track how many times a process open this driver.
@@ -226,6 +226,7 @@
 		sdma_rb_node->refcount++;
 	} else {
 		int ret;
+
 		sdma_rb_node = kmalloc(sizeof(
 			struct qib_user_sdma_rb_node), GFP_KERNEL);
 		if (!sdma_rb_node)
@@ -936,6 +937,7 @@
 
 			if (tiddma) {
 				char *tidsm = (char *)pkt + pktsize;
+
 				cfur = copy_from_user(tidsm,
 					iov[idx].iov_base, tidsmsize);
 				if (cfur) {
@@ -1142,7 +1144,7 @@
 		qib_user_sdma_hwqueue_clean(ppd);
 		qib_user_sdma_queue_clean(ppd, pq);
 		mutex_unlock(&pq->lock);
-		msleep(10);
+		msleep(20);
 	}
 
 	if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@
 
 	if (nfree && !list_empty(pktlist))
 		goto retry;
-
-	return;
 }
 
 /* pq->lock must be held, get packets on the wire... */
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9bcfbd8..4a35998 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1342,6 +1342,7 @@
 done:
 	if (dd->flags & QIB_USE_SPCL_TRIG) {
 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
 		qib_flush_wc();
 		__raw_writel(0xaebecede, piobuf_orig + spcl_off);
 	}
@@ -1744,7 +1745,7 @@
 	 * we allow allocations of more than we report for this value.
 	 */
 
-	pd = kmalloc(sizeof *pd, GFP_KERNEL);
+	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
 	if (!pd) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
@@ -1829,7 +1830,7 @@
 		goto bail;
 	}
 
-	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+	ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
 	if (!ah) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
@@ -1862,7 +1863,7 @@
 	struct ib_ah *ah = ERR_PTR(-EINVAL);
 	struct qib_qp *qp0;
 
-	memset(&attr, 0, sizeof attr);
+	memset(&attr, 0, sizeof(attr));
 	attr.dlid = dlid;
 	attr.port_num = ppd_from_ibp(ibp)->port;
 	rcu_read_lock();
@@ -1977,7 +1978,7 @@
 	struct qib_ucontext *context;
 	struct ib_ucontext *ret;
 
-	context = kmalloc(sizeof *context, GFP_KERNEL);
+	context = kmalloc(sizeof(*context), GFP_KERNEL);
 	if (!context) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
@@ -2054,7 +2055,9 @@
 
 	dev->qp_table_size = ib_qib_qp_table_size;
 	get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
-	dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
+	dev->qp_table = kmalloc_array(
+				dev->qp_table_size,
+				sizeof(*dev->qp_table),
 				GFP_KERNEL);
 	if (!dev->qp_table) {
 		ret = -ENOMEM;
@@ -2122,7 +2125,7 @@
 	for (i = 0; i < ppd->sdma_descq_cnt; i++) {
 		struct qib_verbs_txreq *tx;
 
-		tx = kzalloc(sizeof *tx, GFP_KERNEL);
+		tx = kzalloc(sizeof(*tx), GFP_KERNEL);
 		if (!tx) {
 			ret = -ENOMEM;
 			goto err_tx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index dabb697..f8ea069 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -43,7 +43,7 @@
 {
 	struct qib_mcast_qp *mqp;
 
-	mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+	mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
 	if (!mqp)
 		goto bail;
 
@@ -75,7 +75,7 @@
 {
 	struct qib_mcast *mcast;
 
-	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+	mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
 	if (!mcast)
 		goto bail;
 
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 1d7281c..81b225f 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -72,6 +72,7 @@
 	if (dd->piobcnt2k && dd->piobcnt4k) {
 		/* 2 sizes for chip */
 		unsigned long pio2kbase, pio4kbase;
+
 		pio2kbase = dd->piobufbase & 0xffffffffUL;
 		pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
 		if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@
 	}
 
 	for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-		/* do nothing */ ;
+		; /* do nothing */
 
 	if (piolen != (1ULL << bits)) {
 		piolen >>= bits;
@@ -100,8 +101,8 @@
 		piolen = 1ULL << (bits + 1);
 	}
 	if (pioaddr & (piolen - 1)) {
-		u64 atmp;
-		atmp = pioaddr & ~(piolen - 1);
+		u64 atmp = pioaddr & ~(piolen - 1);
+
 		if (atmp < addr || (atmp + piolen) > (addr + len)) {
 			qib_dev_err(dd,
 				"No way to align address/size (%llx/%llx), no WC mtrr\n",
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5ce2681..b47aea1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -654,7 +654,9 @@
 			   enum dma_data_direction dma_dir);
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-			      struct iser_data_buf *data);
+			      struct iser_data_buf *data,
+			      enum dma_data_direction dir);
+
 int  iser_initialize_task_headers(struct iscsi_task *task,
 			struct iser_tx_desc *tx_desc);
 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3821633..20e859a 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -320,9 +320,6 @@
 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
 	struct iser_device *device = ib_conn->device;
 
-	if (!iser_conn->rx_descs)
-		goto free_login_buf;
-
 	if (device->iser_free_rdma_reg_res)
 		device->iser_free_rdma_reg_res(ib_conn);
 
@@ -334,7 +331,6 @@
 	/* make sure we never redo any unmapping */
 	iser_conn->rx_descs = NULL;
 
-free_login_buf:
 	iser_free_login_buf(iser_conn);
 }
 
@@ -714,19 +710,23 @@
 		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
 		if (is_rdma_data_aligned)
 			iser_dma_unmap_task_data(iser_task,
-						 &iser_task->data[ISER_DIR_IN]);
+						 &iser_task->data[ISER_DIR_IN],
+						 DMA_FROM_DEVICE);
 		if (prot_count && is_rdma_prot_aligned)
 			iser_dma_unmap_task_data(iser_task,
-						 &iser_task->prot[ISER_DIR_IN]);
+						 &iser_task->prot[ISER_DIR_IN],
+						 DMA_FROM_DEVICE);
 	}
 
 	if (iser_task->dir[ISER_DIR_OUT]) {
 		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
 		if (is_rdma_data_aligned)
 			iser_dma_unmap_task_data(iser_task,
-						 &iser_task->data[ISER_DIR_OUT]);
+						 &iser_task->data[ISER_DIR_OUT],
+						 DMA_TO_DEVICE);
 		if (prot_count && is_rdma_prot_aligned)
 			iser_dma_unmap_task_data(iser_task,
-						 &iser_task->prot[ISER_DIR_OUT]);
+						 &iser_task->prot[ISER_DIR_OUT],
+						 DMA_TO_DEVICE);
 	}
 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index abce933..341040b 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -332,12 +332,13 @@
 }
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-			      struct iser_data_buf *data)
+			      struct iser_data_buf *data,
+			      enum dma_data_direction dir)
 {
 	struct ib_device *dev;
 
 	dev = iser_task->iser_conn->ib_conn.device->ib_device;
-	ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+	ib_dma_unmap_sg(dev, data->buf, data->size, dir);
 }
 
 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@
 		iser_data_buf_dump(mem, ibdev);
 
 	/* unmap the command data before accessing it */
-	iser_dma_unmap_task_data(iser_task, mem);
+	iser_dma_unmap_task_data(iser_task, mem,
+				 (cmd_dir == ISER_DIR_OUT) ?
+				 DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
 	/* allocate copy buf, if we are writing, copy the */
 	/* unaligned scatterlist, dma map the copy        */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 695a270..4065abe 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -600,16 +600,16 @@
 /**
  * iser_free_ib_conn_res - release IB related resources
  * @iser_conn: iser connection struct
- * @destroy_device: indicator if we need to try to release
- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
- *     will use this.
+ * @destroy: indicator if we need to try to release the
+ *     iser device and memory regoins pool (only iscsi
+ *     shutdown and DEVICE_REMOVAL will use this).
  *
  * This routine is called with the iser state mutex held
  * so the cm_id removal is out of here. It is Safe to
  * be invoked multiple times.
  */
 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
-				  bool destroy_device)
+				  bool destroy)
 {
 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
 	struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@
 	iser_info("freeing conn %p cma_id %p qp %p\n",
 		  iser_conn, ib_conn->cma_id, ib_conn->qp);
 
-	iser_free_rx_descriptors(iser_conn);
-
 	if (ib_conn->qp != NULL) {
 		ib_conn->comp->active_qps--;
 		rdma_destroy_qp(ib_conn->cma_id);
 		ib_conn->qp = NULL;
 	}
 
-	if (destroy_device && device != NULL) {
-		iser_device_try_release(device);
-		ib_conn->device = NULL;
+	if (destroy) {
+		if (iser_conn->rx_descs)
+			iser_free_rx_descriptors(iser_conn);
+
+		if (device != NULL) {
+			iser_device_try_release(device);
+			ib_conn->device = NULL;
+		}
 	}
 }
 
@@ -643,9 +646,11 @@
 	mutex_unlock(&ig.connlist_mutex);
 
 	mutex_lock(&iser_conn->state_mutex);
+	/* In case we endup here without ep_disconnect being invoked. */
 	if (iser_conn->state != ISER_CONN_DOWN) {
 		iser_warn("iser conn %p state %d, expected state down.\n",
 			  iser_conn, iser_conn->state);
+		iscsi_destroy_endpoint(iser_conn->ep);
 		iser_conn->state = ISER_CONN_DOWN;
 	}
 	/*
@@ -840,7 +845,7 @@
 }
 
 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
-				 bool destroy_device)
+				 bool destroy)
 {
 	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
 
@@ -850,7 +855,7 @@
 	 * and flush errors.
 	 */
 	iser_disconnected_handler(cma_id);
-	iser_free_ib_conn_res(iser_conn, destroy_device);
+	iser_free_ib_conn_res(iser_conn, destroy);
 	complete(&iser_conn->ib_completion);
 };
 
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dafb3c5..075b19c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -38,7 +38,7 @@
 #define ISER_MAX_CQ_LEN		(ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
 				 ISERT_MAX_CONN)
 
-int isert_debug_level = 0;
+static int isert_debug_level;
 module_param_named(debug_level, isert_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
 
@@ -949,7 +949,7 @@
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
 		isert_conn->post_recv_buf_count -= count;
 	} else {
-		isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
+		isert_dbg("Posted %d RX buffers\n", count);
 		isert_conn->conn_rx_desc_head = rx_head;
 	}
 	return ret;
@@ -1351,17 +1351,19 @@
 	struct iscsi_conn *conn = isert_conn->conn;
 	u32 payload_length = ntoh24(hdr->dlength);
 	int rc;
-	unsigned char *text_in;
+	unsigned char *text_in = NULL;
 
 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
 	if (rc < 0)
 		return rc;
 
-	text_in = kzalloc(payload_length, GFP_KERNEL);
-	if (!text_in) {
-		isert_err("Unable to allocate text_in of payload_length: %u\n",
-			  payload_length);
-		return -ENOMEM;
+	if (payload_length) {
+		text_in = kzalloc(payload_length, GFP_KERNEL);
+		if (!text_in) {
+			isert_err("Unable to allocate text_in of payload_length: %u\n",
+				  payload_length);
+			return -ENOMEM;
+		}
 	}
 	cmd->text_in_ptr = text_in;
 
@@ -1434,9 +1436,15 @@
 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_TEXT:
-		cmd = isert_allocate_cmd(conn);
-		if (!cmd)
-			break;
+		if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+			if (!cmd)
+				break;
+		} else {
+			cmd = isert_allocate_cmd(conn);
+			if (!cmd)
+				break;
+		}
 
 		isert_cmd = iscsit_priv_cmd(cmd);
 		ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@
 	struct isert_conn *isert_conn = isert_cmd->conn;
 	struct iscsi_conn *conn = isert_conn->conn;
 	struct isert_device *device = isert_conn->conn_device;
+	struct iscsi_text_rsp *hdr;
 
 	isert_dbg("Cmd %p\n", isert_cmd);
 
@@ -1698,6 +1707,11 @@
 	case ISCSI_OP_REJECT:
 	case ISCSI_OP_NOOP_OUT:
 	case ISCSI_OP_TEXT:
+		hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+		/* If the continue bit is on, keep the command alive */
+		if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
+			break;
+
 		spin_lock_bh(&conn->cmd_lock);
 		if (!list_empty(&cmd->i_conn_node))
 			list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@
 		 * associated cmd->se_cmd needs to be released.
 		 */
 		if (cmd->se_cmd.se_tfo != NULL) {
-			isert_dbg("Calling transport_generic_free_cmd from"
-				 " isert_put_cmd for 0x%02x\n",
+			isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
 				 cmd->iscsi_opcode);
 			transport_generic_free_cmd(&cmd->se_cmd, 0);
 			break;
@@ -2275,7 +2288,7 @@
 	}
 	isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-	isert_dbg("conn %p Text Reject\n", isert_conn);
+	isert_dbg("conn %p Text Response\n", isert_conn);
 
 	return isert_post_response(isert_conn, isert_cmd);
 }
@@ -3136,7 +3149,7 @@
 	spin_lock_bh(&np->np_thread_lock);
 	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 		spin_unlock_bh(&np->np_thread_lock);
-		isert_dbg("np_thread_state %d for isert_accept_np\n",
+		isert_dbg("np_thread_state %d\n",
 			 np->np_thread_state);
 		/**
 		 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@
 {
 	int ret;
 
-	isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+	isert_comp_wq = alloc_workqueue("isert_comp_wq",
+					WQ_UNBOUND | WQ_HIGHPRI, 0);
 	if (!isert_comp_wq) {
 		isert_err("Unable to allocate isert_comp_wq\n");
 		ret = -ENOMEM;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index eb694dd..6e0a477 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3518,7 +3518,7 @@
 	DECLARE_COMPLETION_ONSTACK(release_done);
 	struct srpt_rdma_ch *ch;
 	struct srpt_device *sdev;
-	int res;
+	unsigned long res;
 
 	ch = se_sess->fabric_sess_ptr;
 	WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@
 	spin_unlock_irq(&sdev->spinlock);
 
 	res = wait_for_completion_timeout(&release_done, 60 * HZ);
-	WARN_ON(res <= 0);
+	WARN_ON(res == 0);
 }
 
 /**
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index b784257..d09cefa 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -535,8 +535,7 @@
 		}
 	}
  fail2:	for (i = 0; i < 2; i++)
-		if (port->adi[i].dev)
-			input_free_device(port->adi[i].dev);
+		input_free_device(port->adi[i].dev);
 	gameport_close(gameport);
  fail1:	gameport_set_drvdata(gameport, NULL);
 	kfree(port);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index a89488a..fcef5d1 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -345,13 +345,11 @@
 {
 	const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
 	struct input_dev *input_dev = keypad->input_dev;
-	const struct matrix_keymap_data *keymap_data =
-				pdata ? pdata->matrix_keymap_data : NULL;
 	unsigned short keycode;
 	int i;
 	int error;
 
-	error = matrix_keypad_build_keymap(keymap_data, NULL,
+	error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
 					   pdata->matrix_key_rows,
 					   pdata->matrix_key_cols,
 					   keypad->keycodes, input_dev);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 8ff612d..5639325 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -411,9 +411,9 @@
 
 	input_set_drvdata(input, keypad);
 
-	error = request_threaded_irq(irq, NULL,
-			tc3589x_keypad_irq, plat->irqtype,
-			"tc3589x-keypad", keypad);
+	error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
+				     plat->irqtype | IRQF_ONESHOT,
+				     "tc3589x-keypad", keypad);
 	if (error < 0) {
 		dev_err(&pdev->dev,
 				"Could not allocate irq %d,error %d\n",
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 3f43515..a0fc18f 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -7,29 +7,37 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
 #include <linux/slab.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 #include <asm/portmux.h>
-#include <asm/bfin_rotary.h>
 
-static const u16 per_cnt[] = {
-	P_CNT_CUD,
-	P_CNT_CDG,
-	P_CNT_CZM,
-	0
-};
+#define CNT_CONFIG_OFF		0	/* CNT Config Offset */
+#define CNT_IMASK_OFF		4	/* CNT Interrupt Mask Offset */
+#define CNT_STATUS_OFF		8	/* CNT Status Offset */
+#define CNT_COMMAND_OFF		12	/* CNT Command Offset */
+#define CNT_DEBOUNCE_OFF	16	/* CNT Debounce Offset */
+#define CNT_COUNTER_OFF		20	/* CNT Counter Offset */
+#define CNT_MAX_OFF		24	/* CNT Maximum Count Offset */
+#define CNT_MIN_OFF		28	/* CNT Minimum Count Offset */
 
 struct bfin_rot {
 	struct input_dev *input;
+	void __iomem *base;
 	int irq;
 	unsigned int up_key;
 	unsigned int down_key;
 	unsigned int button_key;
 	unsigned int rel_code;
+
+	unsigned short mode;
+	unsigned short debounce;
+
 	unsigned short cnt_config;
 	unsigned short cnt_imask;
 	unsigned short cnt_debounce;
@@ -59,18 +67,17 @@
 
 static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
 {
-	struct platform_device *pdev = dev_id;
-	struct bfin_rot *rotary = platform_get_drvdata(pdev);
+	struct bfin_rot *rotary = dev_id;
 	int delta;
 
-	switch (bfin_read_CNT_STATUS()) {
+	switch (readw(rotary->base + CNT_STATUS_OFF)) {
 
 	case ICII:
 		break;
 
 	case UCII:
 	case DCII:
-		delta = bfin_read_CNT_COUNTER();
+		delta = readl(rotary->base + CNT_COUNTER_OFF);
 		if (delta)
 			report_rotary_event(rotary, delta);
 		break;
@@ -83,16 +90,52 @@
 		break;
 	}
 
-	bfin_write_CNT_COMMAND(W1LCNT_ZERO);	/* Clear COUNTER */
-	bfin_write_CNT_STATUS(-1);	/* Clear STATUS */
+	writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
+	writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
 
 	return IRQ_HANDLED;
 }
 
+static int bfin_rotary_open(struct input_dev *input)
+{
+	struct bfin_rot *rotary = input_get_drvdata(input);
+	unsigned short val;
+
+	if (rotary->mode & ROT_DEBE)
+		writew(rotary->debounce & DPRESCALE,
+			rotary->base + CNT_DEBOUNCE_OFF);
+
+	writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
+
+	val = UCIE | DCIE;
+	if (rotary->button_key)
+		val |= CZMIE;
+	writew(val, rotary->base + CNT_IMASK_OFF);
+
+	writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
+
+	return 0;
+}
+
+static void bfin_rotary_close(struct input_dev *input)
+{
+	struct bfin_rot *rotary = input_get_drvdata(input);
+
+	writew(0, rotary->base + CNT_CONFIG_OFF);
+	writew(0, rotary->base + CNT_IMASK_OFF);
+}
+
+static void bfin_rotary_free_action(void *data)
+{
+	peripheral_free_list(data);
+}
+
 static int bfin_rotary_probe(struct platform_device *pdev)
 {
-	struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device *dev = &pdev->dev;
+	const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
 	struct bfin_rot *rotary;
+	struct resource *res;
 	struct input_dev *input;
 	int error;
 
@@ -102,18 +145,37 @@
 		return -EINVAL;
 	}
 
-	error = peripheral_request_list(per_cnt, dev_name(&pdev->dev));
-	if (error) {
-		dev_err(&pdev->dev, "requesting peripherals failed\n");
-		return error;
+	if (pdata->pin_list) {
+		error = peripheral_request_list(pdata->pin_list,
+						dev_name(&pdev->dev));
+		if (error) {
+			dev_err(dev, "requesting peripherals failed: %d\n",
+				error);
+			return error;
+		}
+
+		error = devm_add_action(dev, bfin_rotary_free_action,
+					pdata->pin_list);
+		if (error) {
+			dev_err(dev, "setting cleanup action failed: %d\n",
+				error);
+			peripheral_free_list(pdata->pin_list);
+			return error;
+		}
 	}
 
-	rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL);
-	input = input_allocate_device();
-	if (!rotary || !input) {
-		error = -ENOMEM;
-		goto out1;
-	}
+	rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
+	if (!rotary)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rotary->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(rotary->base))
+		return PTR_ERR(rotary->base);
+
+	input = devm_input_allocate_device(dev);
+	if (!input)
+		return -ENOMEM;
 
 	rotary->input = input;
 
@@ -122,9 +184,8 @@
 	rotary->button_key = pdata->rotary_button_key;
 	rotary->rel_code = pdata->rotary_rel_code;
 
-	error = rotary->irq = platform_get_irq(pdev, 0);
-	if (error < 0)
-		goto out1;
+	rotary->mode = pdata->mode;
+	rotary->debounce = pdata->debounce;
 
 	input->name = pdev->name;
 	input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@
 	input->id.product = 0x0001;
 	input->id.version = 0x0100;
 
+	input->open = bfin_rotary_open;
+	input->close = bfin_rotary_close;
+
 	if (rotary->up_key) {
 		__set_bit(EV_KEY, input->evbit);
 		__set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@
 		__set_bit(rotary->button_key, input->keybit);
 	}
 
-	error = request_irq(rotary->irq, bfin_rotary_isr,
-			    0, dev_name(&pdev->dev), pdev);
+	/* Quiesce the device before requesting irq */
+	bfin_rotary_close(input);
+
+	rotary->irq = platform_get_irq(pdev, 0);
+	if (rotary->irq < 0) {
+		dev_err(dev, "No rotary IRQ specified\n");
+		return -ENOENT;
+	}
+
+	error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
+				 0, dev_name(dev), rotary);
 	if (error) {
-		dev_err(&pdev->dev,
-			"unable to claim irq %d; error %d\n",
+		dev_err(dev, "unable to claim irq %d; error %d\n",
 			rotary->irq, error);
-		goto out1;
+		return error;
 	}
 
 	error = input_register_device(input);
 	if (error) {
-		dev_err(&pdev->dev,
-			"unable to register input device (%d)\n", error);
-		goto out2;
+		dev_err(dev, "unable to register input device (%d)\n", error);
+		return error;
 	}
 
-	if (pdata->rotary_button_key)
-		bfin_write_CNT_IMASK(CZMIE);
-
-	if (pdata->mode & ROT_DEBE)
-		bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
-
-	if (pdata->mode)
-		bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
-					(pdata->mode & ~CNTE));
-
-	bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
-	bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
-
 	platform_set_drvdata(pdev, rotary);
 	device_init_wakeup(&pdev->dev, 1);
 
 	return 0;
-
-out2:
-	free_irq(rotary->irq, pdev);
-out1:
-	input_free_device(input);
-	kfree(rotary);
-	peripheral_free_list(per_cnt);
-
-	return error;
 }
 
-static int bfin_rotary_remove(struct platform_device *pdev)
-{
-	struct bfin_rot *rotary = platform_get_drvdata(pdev);
-
-	bfin_write_CNT_CONFIG(0);
-	bfin_write_CNT_IMASK(0);
-
-	free_irq(rotary->irq, pdev);
-	input_unregister_device(rotary->input);
-	peripheral_free_list(per_cnt);
-
-	kfree(rotary);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int bfin_rotary_suspend(struct device *dev)
+static int __maybe_unused bfin_rotary_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-	rotary->cnt_config = bfin_read_CNT_CONFIG();
-	rotary->cnt_imask = bfin_read_CNT_IMASK();
-	rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE();
+	rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
+	rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
+	rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
 
 	if (device_may_wakeup(&pdev->dev))
 		enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@
 	return 0;
 }
 
-static int bfin_rotary_resume(struct device *dev)
+static int __maybe_unused bfin_rotary_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-	bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce);
-	bfin_write_CNT_IMASK(rotary->cnt_imask);
-	bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE);
+	writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
+	writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
+	writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
 
 	if (device_may_wakeup(&pdev->dev))
 		disable_irq_wake(rotary->irq);
 
 	if (rotary->cnt_config & CNTE)
-		bfin_write_CNT_CONFIG(rotary->cnt_config);
+		writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
 
 	return 0;
 }
 
-static const struct dev_pm_ops bfin_rotary_pm_ops = {
-	.suspend	= bfin_rotary_suspend,
-	.resume		= bfin_rotary_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
+			 bfin_rotary_suspend, bfin_rotary_resume);
 
 static struct platform_driver bfin_rotary_device_driver = {
 	.probe		= bfin_rotary_probe,
-	.remove		= bfin_rotary_remove,
 	.driver		= {
 		.name	= "bfin-rotary",
-#ifdef CONFIG_PM
 		.pm	= &bfin_rotary_pm_ops,
-#endif
 	},
 };
 module_platform_driver(bfin_rotary_device_driver);
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 59d4dcd..9822877 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -187,6 +187,7 @@
 	idev->private		= m;
 	idev->input->name	= MMA8450_DRV_NAME;
 	idev->input->id.bustype	= BUS_I2C;
+	idev->input->dev.parent = &c->dev;
 	idev->poll		= mma8450_poll;
 	idev->poll_interval	= POLL_INTERVAL;
 	idev->poll_interval_max	= POLL_INTERVAL_MAX;
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 79cc0f7..e8e010a 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -195,7 +195,7 @@
 
 static struct soc_button_info soc_button_PNP0C40[] = {
 	{ "power", 0, EV_KEY, KEY_POWER, false, true },
-	{ "home", 1, EV_KEY, KEY_HOME, false, true },
+	{ "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
 	{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
 	{ "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
 	{ "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index f205b8b..1bd15eb 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -99,36 +99,58 @@
 #define ALPS_FOUR_BUTTONS	0x40	/* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED	0x80	/* 3-byte PS/2 packet interleaved with
 					   6-byte ALPS packet */
-#define ALPS_IS_RUSHMORE	0x100	/* device is a rushmore */
 #define ALPS_BUTTONPAD		0x200	/* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
-	{ { 0x32, 0x02, 0x14 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },	/* Toshiba Salellite Pro M10 */
-	{ { 0x33, 0x02, 0x0a },	0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 },				/* UMAX-530T */
-	{ { 0x53, 0x02, 0x0a },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x53, 0x02, 0x14 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },				/* HP ze1115 */
-	{ { 0x63, 0x02, 0x0a },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x63, 0x02, 0x14 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x63, 0x02, 0x28 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },		/* Fujitsu Siemens S6010 */
-	{ { 0x63, 0x02, 0x3c },	0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL },			/* Toshiba Satellite S2400-103 */
-	{ { 0x63, 0x02, 0x50 },	0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 },		/* NEC Versa L320 */
-	{ { 0x63, 0x02, 0x64 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },	/* Dell Latitude D800 */
-	{ { 0x73, 0x00, 0x0a },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT },		/* ThinkPad R61 8918-5QG */
-	{ { 0x73, 0x02, 0x0a },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-	{ { 0x73, 0x02, 0x14 },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },		/* Ahtec Laptop */
-	{ { 0x20, 0x02, 0x0e },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },	/* XXX */
-	{ { 0x22, 0x02, 0x0a },	0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
-	{ { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT },	/* Dell Latitude D600 */
+	{ { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },	/* Toshiba Salellite Pro M10 */
+	{ { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } },				/* UMAX-530T */
+	{ { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },				/* HP ze1115 */
+	{ { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },		/* Fujitsu Siemens S6010 */
+	{ { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } },		/* Toshiba Satellite S2400-103 */
+	{ { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } },		/* NEC Versa L320 */
+	{ { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },	/* Dell Latitude D800 */
+	{ { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } },		/* ThinkPad R61 8918-5QG */
+	{ { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+	{ { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },		/* Ahtec Laptop */
+
+	/*
+	 * XXX This entry is suspicious. First byte has zero lower nibble,
+	 * which is what a normal mouse would report. Also, the value 0x0e
+	 * isn't valid per PS/2 spec.
+	 */
+	{ { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+
+	{ { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+	{ { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } },	/* Dell Latitude D600 */
 	/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
-	{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
-		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-	{ { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT },		/* Dell XT2 */
-	{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },		/* Dell Vostro 1400 */
-	{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
-		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },				/* Toshiba Tecra A11-11L */
-	{ { 0x73, 0x02, 0x64 },	0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
+	{ { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
+		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
+	{ { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } },		/* Dell XT2 */
+	{ { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } },	/* Dell Vostro 1400 */
+	{ { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
+		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },				/* Toshiba Tecra A11-11L */
+	{ { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
+};
+
+static const struct alps_protocol_info alps_v3_protocol_data = {
+	ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v3_rushmore_data = {
+	ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v5_protocol_data = {
+	ALPS_PROTO_V5, 0xc8, 0xd8, 0
+};
+
+static const struct alps_protocol_info alps_v7_protocol_data = {
+	ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
 };
 
 static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@
 static void alps_set_abs_params_mt(struct alps_data *priv,
 				   struct input_dev *dev1);
 
-/*
- * XXX - this entry is suspicious. First byte has zero lower nibble,
- * which is what a normal mouse would report. Also, the value 0x0e
- * isn't valid per PS/2 spec.
- */
-
 /* Packet formats are described in Documentation/input/alps.txt */
 
 static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@
 	return (data & priv->mask0) == priv->byte0;
 }
 
-static void alps_report_buttons(struct psmouse *psmouse,
-				struct input_dev *dev1, struct input_dev *dev2,
+static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
 				int left, int right, int middle)
 {
 	struct input_dev *dev;
@@ -161,20 +176,21 @@
 	 * other device (dev2) then this event should be also
 	 * sent through that device.
 	 */
-	dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+	dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
 	input_report_key(dev, BTN_LEFT, left);
 
-	dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+	dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
 	input_report_key(dev, BTN_RIGHT, right);
 
-	dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+	dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
 	input_report_key(dev, BTN_MIDDLE, middle);
 
 	/*
 	 * Sync the _other_ device now, we'll do the first
 	 * device later once we report the rest of the events.
 	 */
-	input_sync(dev2);
+	if (dev2)
+		input_sync(dev2);
 }
 
 static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@
 		input_report_rel(dev2, REL_X,  (x > 383 ? (x - 768) : x));
 		input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
 
-		alps_report_buttons(psmouse, dev2, dev, left, right, middle);
+		alps_report_buttons(dev2, dev, left, right, middle);
 
 		input_sync(dev2);
 		return;
 	}
 
-	alps_report_buttons(psmouse, dev, dev2, left, right, middle);
+	alps_report_buttons(dev, dev2, left, right, middle);
 
 	/* Convert hardware tap to a reasonable Z value */
 	if (ges && !fin)
@@ -412,7 +428,7 @@
 		(2 * (priv->y_bits - 1));
 
 	/* y-bitmap order is reversed, except on rushmore */
-	if (!(priv->flags & ALPS_IS_RUSHMORE)) {
+	if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
 		fields->mt[0].y = priv->y_max - fields->mt[0].y;
 		fields->mt[1].y = priv->y_max - fields->mt[1].y;
 	}
@@ -648,7 +664,8 @@
 		 */
 		if (f->is_mp) {
 			fingers = f->fingers;
-			if (priv->proto_version == ALPS_PROTO_V3) {
+			if (priv->proto_version == ALPS_PROTO_V3 ||
+			    priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
 				if (alps_process_bitmap(priv, f) == 0)
 					fingers = 0; /* Use st data */
 
@@ -892,34 +909,6 @@
 					  unsigned char *pkt,
 					  unsigned char pkt_id)
 {
-	/*
-	 *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
-	 * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
-	 * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
-	 * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
-	 * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
-	 * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
-	 * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
-	 * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
-	 * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
-	 * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
-	 * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
-	 * L:         Left button
-	 * R / M:     Non-clickpads: Right / Middle button
-	 *            Clickpads: When > 2 fingers are down, and some fingers
-	 *            are in the button area, then the 2 coordinates reported
-	 *            are for fingers outside the button area and these report
-	 *            extra fingers being present in the right / left button
-	 *            area. Note these fingers are not added to the F field!
-	 *            so if a TWO packet is received and R = 1 then there are
-	 *            3 fingers down, etc.
-	 * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
-	 *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
-	 *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
-	 *               in NEW fmt
-	 * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
-	 */
-
 	mt[0].x = ((pkt[2] & 0x80) << 4);
 	mt[0].x |= ((pkt[2] & 0x3F) << 5);
 	mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@
 		return;
 	}
 
-	/*
-	 *        b7 b6 b5 b4 b3 b2 b1 b0
-	 * Byte0   0  1  0  0  1  0  0  0
-	 * Byte1   1  1  *  *  1  M  R  L
-	 * Byte2  X7  1 X5 X4 X3 X2 X1 X0
-	 * Byte3  Z6  1 Y6 X6  1 Y2 Y1 Y0
-	 * Byte4  Y7  0 Y5 Y4 Y3  1  1  0
-	 * Byte5 T&P  0 Z5 Z4 Z3 Z2 Z1 Z0
-	 * M / R / L: Middle / Right / Left button
-	 */
-
 	x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
 	y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
 	    ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@
 		alps_process_touchpad_packet_v7(psmouse);
 }
 
-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+static DEFINE_MUTEX(alps_mutex);
+
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+	struct alps_data *priv =
+		container_of(work, struct alps_data, dev3_register_work.work);
+	struct psmouse *psmouse = priv->psmouse;
+	struct input_dev *dev3;
+	int error = 0;
+
+	mutex_lock(&alps_mutex);
+
+	if (priv->dev3)
+		goto out;
+
+	dev3 = input_allocate_device();
+	if (!dev3) {
+		psmouse_err(psmouse, "failed to allocate secondary device\n");
+		error = -ENOMEM;
+		goto out;
+	}
+
+	snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
+		 psmouse->ps2dev.serio->phys,
+		 (priv->dev2 ? "input2" : "input1"));
+	dev3->phys = priv->phys3;
+
+	/*
+	 * format of input device name is: "protocol vendor name"
+	 * see function psmouse_switch_protocol() in psmouse-base.c
+	 */
+	dev3->name = "PS/2 ALPS Mouse";
+
+	dev3->id.bustype = BUS_I8042;
+	dev3->id.vendor  = 0x0002;
+	dev3->id.product = PSMOUSE_PS2;
+	dev3->id.version = 0x0000;
+	dev3->dev.parent = &psmouse->ps2dev.serio->dev;
+
+	input_set_capability(dev3, EV_REL, REL_X);
+	input_set_capability(dev3, EV_REL, REL_Y);
+	input_set_capability(dev3, EV_KEY, BTN_LEFT);
+	input_set_capability(dev3, EV_KEY, BTN_RIGHT);
+	input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
+
+	__set_bit(INPUT_PROP_POINTER, dev3->propbit);
+
+	error = input_register_device(dev3);
+	if (error) {
+		psmouse_err(psmouse,
+			    "failed to register secondary device: %d\n",
+			    error);
+		input_free_device(dev3);
+		goto out;
+	}
+
+	priv->dev3 = dev3;
+
+out:
+	/*
+	 * Save the error code so that we can detect that we
+	 * already tried to create the device.
+	 */
+	if (error)
+		priv->dev3 = ERR_PTR(error);
+
+	mutex_unlock(&alps_mutex);
+}
+
+static void alps_report_bare_ps2_packet(struct input_dev *dev,
 					unsigned char packet[],
 					bool report_buttons)
 {
-	struct alps_data *priv = psmouse->private;
-	struct input_dev *dev2 = priv->dev2;
-
 	if (report_buttons)
-		alps_report_buttons(psmouse, dev2, psmouse->dev,
+		alps_report_buttons(dev, NULL,
 				packet[0] & 1, packet[0] & 2, packet[0] & 4);
 
-	input_report_rel(dev2, REL_X,
+	input_report_rel(dev, REL_X,
 		packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
-	input_report_rel(dev2, REL_Y,
+	input_report_rel(dev, REL_Y,
 		packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
 
-	input_sync(dev2);
+	input_sync(dev);
 }
 
 static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@
 		 * de-synchronization.
 		 */
 
-		alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
-					    false);
+		alps_report_bare_ps2_packet(priv->dev2,
+					    &psmouse->packet[3], false);
 
 		/*
 		 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@
 	 * properly we only do this if the device is fully synchronized.
 	 */
 	if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+
+		/* Register dev3 mouse if we received PS/2 packet first time */
+		if (unlikely(!priv->dev3))
+			psmouse_queue_work(psmouse,
+					   &priv->dev3_register_work, 0);
+
 		if (psmouse->pktcnt == 3) {
-			alps_report_bare_ps2_packet(psmouse, psmouse->packet,
-						    true);
+			/* Once dev3 mouse device is registered report data */
+			if (likely(!IS_ERR_OR_NULL(priv->dev3)))
+				alps_report_bare_ps2_packet(priv->dev3,
+							    psmouse->packet,
+							    true);
 			return PSMOUSE_FULL_PACKET;
 		}
 		return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@
 			    psmouse->pktcnt - 1,
 			    psmouse->packet[psmouse->pktcnt - 1]);
 
-		if (priv->proto_version == ALPS_PROTO_V3 &&
+		if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
 		    psmouse->pktcnt == psmouse->pktsize) {
 			/*
 			 * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@
 	 * all.
 	 */
 	if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
-		psmouse_warn(psmouse, "trackstick E7 report failed\n");
+		psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
 		ret = -ENODEV;
 	} else {
 		psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@
 						   ALPS_REG_BASE_RUSHMORE);
 		if (reg_val == -EIO)
 			goto error;
-		if (reg_val == -ENODEV)
-			priv->flags &= ~ALPS_DUALPOINT;
 	}
 
 	if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@
 	return ret;
 }
 
-static void alps_set_defaults(struct alps_data *priv)
+static int alps_set_protocol(struct psmouse *psmouse,
+			     struct alps_data *priv,
+			     const struct alps_protocol_info *protocol)
 {
-	priv->byte0 = 0x8f;
-	priv->mask0 = 0x8f;
-	priv->flags = ALPS_DUALPOINT;
+	psmouse->private = priv;
+
+	setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
+	priv->proto_version = protocol->version;
+	priv->byte0 = protocol->byte0;
+	priv->mask0 = protocol->mask0;
+	priv->flags = protocol->flags;
 
 	priv->x_max = 2000;
 	priv->y_max = 1400;
@@ -2182,6 +2240,7 @@
 		priv->x_max = 1023;
 		priv->y_max = 767;
 		break;
+
 	case ALPS_PROTO_V3:
 		priv->hw_init = alps_hw_init_v3;
 		priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@
 		priv->nibble_commands = alps_v3_nibble_commands;
 		priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
 		break;
+
+	case ALPS_PROTO_V3_RUSHMORE:
+		priv->hw_init = alps_hw_init_rushmore_v3;
+		priv->process_packet = alps_process_packet_v3;
+		priv->set_abs_params = alps_set_abs_params_mt;
+		priv->decode_fields = alps_decode_rushmore;
+		priv->nibble_commands = alps_v3_nibble_commands;
+		priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+		priv->x_bits = 16;
+		priv->y_bits = 12;
+
+		if (alps_probe_trackstick_v3(psmouse,
+					     ALPS_REG_BASE_RUSHMORE) < 0)
+			priv->flags &= ~ALPS_DUALPOINT;
+
+		break;
+
 	case ALPS_PROTO_V4:
 		priv->hw_init = alps_hw_init_v4;
 		priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@
 		priv->nibble_commands = alps_v4_nibble_commands;
 		priv->addr_command = PSMOUSE_CMD_DISABLE;
 		break;
+
 	case ALPS_PROTO_V5:
 		priv->hw_init = alps_hw_init_dolphin_v1;
 		priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@
 		priv->set_abs_params = alps_set_abs_params_mt;
 		priv->nibble_commands = alps_v3_nibble_commands;
 		priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-		priv->byte0 = 0xc8;
-		priv->mask0 = 0xd8;
-		priv->flags = 0;
 		priv->x_max = 1360;
 		priv->y_max = 660;
 		priv->x_bits = 23;
 		priv->y_bits = 12;
 		break;
+
 	case ALPS_PROTO_V6:
 		priv->hw_init = alps_hw_init_v6;
 		priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@
 		priv->x_max = 2047;
 		priv->y_max = 1535;
 		break;
+
 	case ALPS_PROTO_V7:
 		priv->hw_init = alps_hw_init_v7;
 		priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@
 		priv->set_abs_params = alps_set_abs_params_mt;
 		priv->nibble_commands = alps_v3_nibble_commands;
 		priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-		priv->x_max = 0xfff;
-		priv->y_max = 0x7ff;
-		priv->byte0 = 0x48;
-		priv->mask0 = 0x48;
+
+		if (alps_dolphin_get_device_area(psmouse, priv))
+			return -EIO;
 
 		if (priv->fw_ver[1] != 0xba)
 			priv->flags |= ALPS_BUTTONPAD;
+
 		break;
 	}
+
+	return 0;
 }
 
-static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
-			    unsigned char *e7, unsigned char *ec)
+static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
+							 unsigned char *ec)
 {
 	const struct alps_model_info *model;
 	int i;
@@ -2251,23 +2329,18 @@
 		    (!model->command_mode_resp ||
 		     model->command_mode_resp == ec[2])) {
 
-			priv->proto_version = model->proto_version;
-			alps_set_defaults(priv);
-
-			priv->flags = model->flags;
-			priv->byte0 = model->byte0;
-			priv->mask0 = model->mask0;
-
-			return 0;
+			return &model->protocol_info;
 		}
 	}
 
-	return -EINVAL;
+	return NULL;
 }
 
 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
 {
+	const struct alps_protocol_info *protocol;
 	unsigned char e6[4], e7[4], ec[4];
+	int error;
 
 	/*
 	 * First try "E6 report".
@@ -2293,54 +2366,35 @@
 	    alps_exit_command_mode(psmouse))
 		return -EIO;
 
-	/* Save the Firmware version */
-	memcpy(priv->fw_ver, ec, 3);
-
-	if (alps_match_table(psmouse, priv, e7, ec) == 0) {
-		return 0;
-	} else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
-		   ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
-		priv->proto_version = ALPS_PROTO_V5;
-		alps_set_defaults(priv);
-		if (alps_dolphin_get_device_area(psmouse, priv))
-			return -EIO;
-		else
-			return 0;
-	} else if (ec[0] == 0x88 &&
-		   ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
-		priv->proto_version = ALPS_PROTO_V7;
-		alps_set_defaults(priv);
-
-		return 0;
-	} else if (ec[0] == 0x88 && ec[1] == 0x08) {
-		priv->proto_version = ALPS_PROTO_V3;
-		alps_set_defaults(priv);
-
-		priv->hw_init = alps_hw_init_rushmore_v3;
-		priv->decode_fields = alps_decode_rushmore;
-		priv->x_bits = 16;
-		priv->y_bits = 12;
-		priv->flags |= ALPS_IS_RUSHMORE;
-
-		/* hack to make addr_command, nibble_command available */
-		psmouse->private = priv;
-
-		if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
-			priv->flags &= ~ALPS_DUALPOINT;
-
-		return 0;
-	} else if (ec[0] == 0x88 && ec[1] == 0x07 &&
-		   ec[2] >= 0x90 && ec[2] <= 0x9d) {
-		priv->proto_version = ALPS_PROTO_V3;
-		alps_set_defaults(priv);
-
-		return 0;
+	protocol = alps_match_table(e7, ec);
+	if (!protocol) {
+		if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
+			   ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
+			protocol = &alps_v5_protocol_data;
+		} else if (ec[0] == 0x88 &&
+			   ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
+			protocol = &alps_v7_protocol_data;
+		} else if (ec[0] == 0x88 && ec[1] == 0x08) {
+			protocol = &alps_v3_rushmore_data;
+		} else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+			   ec[2] >= 0x90 && ec[2] <= 0x9d) {
+			protocol = &alps_v3_protocol_data;
+		} else {
+			psmouse_dbg(psmouse,
+				    "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+			return -EINVAL;
+		}
 	}
 
-	psmouse_dbg(psmouse,
-		    "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+	if (priv) {
+		/* Save the Firmware version */
+		memcpy(priv->fw_ver, ec, 3);
+		error = alps_set_protocol(psmouse, priv, protocol);
+		if (error)
+			return error;
+	}
 
-	return -EINVAL;
+	return 0;
 }
 
 static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@
 
 	psmouse_reset(psmouse);
 	del_timer_sync(&priv->timer);
-	input_unregister_device(priv->dev2);
+	if (priv->dev2)
+		input_unregister_device(priv->dev2);
+	if (!IS_ERR_OR_NULL(priv->dev3))
+		input_unregister_device(priv->dev3);
 	kfree(priv);
 }
 
@@ -2394,25 +2451,12 @@
 
 int alps_init(struct psmouse *psmouse)
 {
-	struct alps_data *priv;
-	struct input_dev *dev1 = psmouse->dev, *dev2;
+	struct alps_data *priv = psmouse->private;
+	struct input_dev *dev1 = psmouse->dev;
+	int error;
 
-	priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
-	dev2 = input_allocate_device();
-	if (!priv || !dev2)
-		goto init_fail;
-
-	priv->dev2 = dev2;
-	setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
-
-	psmouse->private = priv;
-
-	psmouse_reset(psmouse);
-
-	if (alps_identify(psmouse, priv) < 0)
-		goto init_fail;
-
-	if (priv->hw_init(psmouse))
+	error = priv->hw_init(psmouse);
+	if (error)
 		goto init_fail;
 
 	/*
@@ -2462,36 +2506,57 @@
 	}
 
 	if (priv->flags & ALPS_DUALPOINT) {
+		struct input_dev *dev2;
+
+		dev2 = input_allocate_device();
+		if (!dev2) {
+			psmouse_err(psmouse,
+				    "failed to allocate trackstick device\n");
+			error = -ENOMEM;
+			goto init_fail;
+		}
+
+		snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
+			 psmouse->ps2dev.serio->phys);
+		dev2->phys = priv->phys2;
+
 		/*
 		 * format of input device name is: "protocol vendor name"
 		 * see function psmouse_switch_protocol() in psmouse-base.c
 		 */
 		dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
+
+		dev2->id.bustype = BUS_I8042;
+		dev2->id.vendor  = 0x0002;
 		dev2->id.product = PSMOUSE_ALPS;
 		dev2->id.version = priv->proto_version;
-	} else {
-		dev2->name = "PS/2 ALPS Mouse";
-		dev2->id.product = PSMOUSE_PS2;
-		dev2->id.version = 0x0000;
-	}
+		dev2->dev.parent = &psmouse->ps2dev.serio->dev;
 
-	snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
-	dev2->phys = priv->phys;
-	dev2->id.bustype = BUS_I8042;
-	dev2->id.vendor  = 0x0002;
-	dev2->dev.parent = &psmouse->ps2dev.serio->dev;
+		input_set_capability(dev2, EV_REL, REL_X);
+		input_set_capability(dev2, EV_REL, REL_Y);
+		input_set_capability(dev2, EV_KEY, BTN_LEFT);
+		input_set_capability(dev2, EV_KEY, BTN_RIGHT);
+		input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
 
-	dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-	dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-	dev2->keybit[BIT_WORD(BTN_LEFT)] =
-		BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
-
-	__set_bit(INPUT_PROP_POINTER, dev2->propbit);
-	if (priv->flags & ALPS_DUALPOINT)
+		__set_bit(INPUT_PROP_POINTER, dev2->propbit);
 		__set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
 
-	if (input_register_device(priv->dev2))
-		goto init_fail;
+		error = input_register_device(dev2);
+		if (error) {
+			psmouse_err(psmouse,
+				    "failed to register trackstick device: %d\n",
+				    error);
+			input_free_device(dev2);
+			goto init_fail;
+		}
+
+		priv->dev2 = dev2;
+	}
+
+	priv->psmouse = psmouse;
+
+	INIT_DELAYED_WORK(&priv->dev3_register_work,
+			  alps_register_bare_ps2_mouse);
 
 	psmouse->protocol_handler = alps_process_byte;
 	psmouse->poll = alps_poll;
@@ -2509,25 +2574,58 @@
 
 init_fail:
 	psmouse_reset(psmouse);
-	input_free_device(dev2);
-	kfree(priv);
+	/*
+	 * Even though we did not allocate psmouse->private we do free
+	 * it here.
+	 */
+	kfree(psmouse->private);
 	psmouse->private = NULL;
-	return -1;
+	return error;
 }
 
 int alps_detect(struct psmouse *psmouse, bool set_properties)
 {
-	struct alps_data dummy;
+	struct alps_data *priv;
+	int error;
 
-	if (alps_identify(psmouse, &dummy) < 0)
-		return -1;
+	error = alps_identify(psmouse, NULL);
+	if (error)
+		return error;
+
+	/*
+	 * Reset the device to make sure it is fully operational:
+	 * on some laptops, like certain Dell Latitudes, we may
+	 * fail to properly detect presence of trackstick if device
+	 * has not been reset.
+	 */
+	psmouse_reset(psmouse);
+
+	priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	error = alps_identify(psmouse, priv);
+	if (error) {
+		kfree(priv);
+		return error;
+	}
 
 	if (set_properties) {
 		psmouse->vendor = "ALPS";
-		psmouse->name = dummy.flags & ALPS_DUALPOINT ?
+		psmouse->name = priv->flags & ALPS_DUALPOINT ?
 				"DualPoint TouchPad" : "GlidePoint";
-		psmouse->model = dummy.proto_version << 8;
+		psmouse->model = priv->proto_version;
+	} else {
+		/*
+		 * Destroy alps_data structure we allocated earlier since
+		 * this was just a "trial run". Otherwise we'll keep it
+		 * to be used by alps_init() which has to be called if
+		 * we succeed and set_properties is true.
+		 */
+		kfree(priv);
+		psmouse->private = NULL;
 	}
+
 	return 0;
 }
 
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 66240b4..02513c0 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -14,13 +14,14 @@
 
 #include <linux/input/mt.h>
 
-#define ALPS_PROTO_V1	1
-#define ALPS_PROTO_V2	2
-#define ALPS_PROTO_V3	3
-#define ALPS_PROTO_V4	4
-#define ALPS_PROTO_V5	5
-#define ALPS_PROTO_V6	6
-#define ALPS_PROTO_V7	7	/* t3btl t4s */
+#define ALPS_PROTO_V1		0x100
+#define ALPS_PROTO_V2		0x200
+#define ALPS_PROTO_V3		0x300
+#define ALPS_PROTO_V3_RUSHMORE	0x310
+#define ALPS_PROTO_V4		0x400
+#define ALPS_PROTO_V5		0x500
+#define ALPS_PROTO_V6		0x600
+#define ALPS_PROTO_V7		0x700	/* t3btl t4s */
 
 #define MAX_TOUCHES	2
 
@@ -46,29 +47,37 @@
 };
 
 /**
+ * struct alps_protocol_info - information about protocol used by a device
+ * @version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ */
+struct alps_protocol_info {
+	u16 version;
+	u8 byte0, mask0;
+	unsigned int flags;
+};
+
+/**
  * struct alps_model_info - touchpad ID table
  * @signature: E7 response string to match.
  * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
  *   (aka command mode response) identifies the firmware minor version.  This
  *   can be used to distinguish different hardware models which are not
  *   uniquely identifiable through their E7 responses.
- * @proto_version: Indicates V1/V2/V3/...
- * @byte0: Helps figure out whether a position report packet matches the
- *   known format for this model.  The first byte of the report, ANDed with
- *   mask0, should match byte0.
- * @mask0: The mask used to check the first byte of the report.
- * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @protocol_info: information about protcol used by the device.
  *
  * Many (but not all) ALPS touchpads can be identified by looking at the
  * values returned in the "E7 report" and/or the "EC report."  This table
  * lists a number of such touchpads.
  */
 struct alps_model_info {
-	unsigned char signature[3];
-	unsigned char command_mode_resp;
-	unsigned char proto_version;
-	unsigned char byte0, mask0;
-	int flags;
+	u8 signature[3];
+	u8 command_mode_resp;
+	struct alps_protocol_info protocol_info;
 };
 
 /**
@@ -132,8 +141,12 @@
 
 /**
  * struct alps_data - private data structure for the ALPS driver
- * @dev2: "Relative" device used to report trackstick or mouse activity.
- * @phys: Physical path for the relative device.
+ * @psmouse: Pointer to parent psmouse device
+ * @dev2: Trackstick device (can be NULL).
+ * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
+ * @phys2: Physical path for the trackstick device.
+ * @phys3: Physical path for the generic PS/2 mouse.
+ * @dev3_register_work: Delayed work for registering PS/2 mouse.
  * @nibble_commands: Command mapping used for touchpad register accesses.
  * @addr_command: Command used to tell the touchpad that a register address
  *   follows.
@@ -160,15 +173,19 @@
  * @timer: Timer for flushing out the final report packet in the stream.
  */
 struct alps_data {
+	struct psmouse *psmouse;
 	struct input_dev *dev2;
-	char phys[32];
+	struct input_dev *dev3;
+	char phys2[32];
+	char phys3[32];
+	struct delayed_work dev3_register_work;
 
 	/* these are autodetected when the device is identified */
 	const struct alps_nibble_commands *nibble_commands;
 	int addr_command;
-	unsigned char proto_version;
-	unsigned char byte0, mask0;
-	unsigned char fw_ver[3];
+	u16 proto_version;
+	u8 byte0, mask0;
+	u8 fw_ver[3];
 	int flags;
 	int x_max;
 	int y_max;
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 77e9d70..1e2291c 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -20,7 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 #include "cyapa.h"
 
 
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index ddf5393..5b611dd 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -17,7 +17,7 @@
 #include <linux/mutex.h>
 #include <linux/completion.h>
 #include <linux/slab.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 #include <linux/crc-itu-t.h>
 #include "cyapa.h"
 
@@ -1926,7 +1926,7 @@
 				electrodes_tx = cyapa->electrodes_x;
 			max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) &
 						~7u) * electrodes_tx;
-		} else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) {
+		} else {
 			offset = 2;
 			max_element_cnt = cyapa->electrodes_x +
 						cyapa->electrodes_y;
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 9118a18..28dcfc8 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -710,8 +710,3 @@
 
 	return -1;
 }
-
-bool cypress_supported(void)
-{
-	return true;
-}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
index 4720f21..81f68aa 100644
--- a/drivers/input/mouse/cypress_ps2.h
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -172,7 +172,6 @@
 #ifdef CONFIG_MOUSE_PS2_CYPRESS
 int cypress_detect(struct psmouse *psmouse, bool set_properties);
 int cypress_init(struct psmouse *psmouse);
-bool cypress_supported(void);
 #else
 inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
 {
@@ -182,10 +181,6 @@
 {
 	return -ENOSYS;
 }
-inline bool cypress_supported(void)
-{
-	return 0;
-}
 #endif /* CONFIG_MOUSE_PS2_CYPRESS */
 
 #endif  /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index fca38ba..23d2594 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -67,9 +67,6 @@
 
 #define FOC_MAX_FINGERS 5
 
-#define FOC_MAX_X 2431
-#define FOC_MAX_Y 1663
-
 /*
  * Current state of a single finger on the touchpad.
  */
@@ -129,9 +126,17 @@
 		input_mt_slot(dev, i);
 		input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
 		if (active) {
-			input_report_abs(dev, ABS_MT_POSITION_X, finger->x);
+			unsigned int clamped_x, clamped_y;
+			/*
+			 * The touchpad might report invalid data, so we clamp
+			 * the resulting values so that we do not confuse
+			 * userspace.
+			 */
+			clamped_x = clamp(finger->x, 0U, priv->x_max);
+			clamped_y = clamp(finger->y, 0U, priv->y_max);
+			input_report_abs(dev, ABS_MT_POSITION_X, clamped_x);
 			input_report_abs(dev, ABS_MT_POSITION_Y,
-					 FOC_MAX_Y - finger->y);
+					 priv->y_max - clamped_y);
 		}
 	}
 	input_mt_report_pointer_emulation(dev, true);
@@ -180,16 +185,6 @@
 
 	state->pressed = (packet[0] >> 4) & 1;
 
-	/*
-	 * packet[5] contains some kind of tool size in the most
-	 * significant nibble. 0xff is a special value (latching) that
-	 * signals a large contact area.
-	 */
-	if (packet[5] == 0xff) {
-		state->fingers[finger].valid = false;
-		return;
-	}
-
 	state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2];
 	state->fingers[finger].y = (packet[3] << 8) | packet[4];
 	state->fingers[finger].valid = true;
@@ -381,6 +376,23 @@
 
 	return 0;
 }
+
+void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+	/* not supported yet */
+}
+
+static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+	/* not supported yet */
+}
+
+static void focaltech_set_scale(struct psmouse *psmouse,
+				enum psmouse_scale scale)
+{
+	/* not supported yet */
+}
+
 int focaltech_init(struct psmouse *psmouse)
 {
 	struct focaltech_data *priv;
@@ -415,6 +427,14 @@
 	psmouse->cleanup = focaltech_reset;
 	/* resync is not supported yet */
 	psmouse->resync_time = 0;
+	/*
+	 * rate/resolution/scale changes are not supported yet, and
+	 * the generic implementations of these functions seem to
+	 * confuse some touchpads
+	 */
+	psmouse->set_resolution = focaltech_set_resolution;
+	psmouse->set_rate = focaltech_set_rate;
+	psmouse->set_scale = focaltech_set_scale;
 
 	return 0;
 
@@ -424,11 +444,6 @@
 	return error;
 }
 
-bool focaltech_supported(void)
-{
-	return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_FOCALTECH */
 
 int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +453,4 @@
 	return 0;
 }
 
-bool focaltech_supported(void)
-{
-	return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_FOCALTECH */
diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h
index 71870a9..ca61ebf 100644
--- a/drivers/input/mouse/focaltech.h
+++ b/drivers/input/mouse/focaltech.h
@@ -19,6 +19,5 @@
 
 int focaltech_detect(struct psmouse *psmouse, bool set_properties);
 int focaltech_init(struct psmouse *psmouse);
-bool focaltech_supported(void);
 
 #endif
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 68469fed..8bc6123 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -454,6 +454,17 @@
 }
 
 /*
+ * Here we set the mouse scaling.
+ */
+
+static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale)
+{
+	ps2_command(&psmouse->ps2dev, NULL,
+		    scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 :
+					       PSMOUSE_CMD_SETSCALE11);
+}
+
+/*
  * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
  */
 
@@ -689,6 +700,7 @@
 
 	psmouse->set_rate = psmouse_set_rate;
 	psmouse->set_resolution = psmouse_set_resolution;
+	psmouse->set_scale = psmouse_set_scale;
 	psmouse->poll = psmouse_poll;
 	psmouse->protocol_handler = psmouse_process_byte;
 	psmouse->pktsize = 3;
@@ -727,7 +739,7 @@
 	if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
 		if (max_proto > PSMOUSE_IMEX) {
 			if (!set_properties || focaltech_init(psmouse) == 0) {
-				if (focaltech_supported())
+				if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
 					return PSMOUSE_FOCALTECH;
 				/*
 				 * Note that we need to also restrict
@@ -776,7 +788,7 @@
  * Try activating protocol, but check if support is enabled first, since
  * we try detecting Synaptics even when protocol is disabled.
  */
-			if (synaptics_supported() &&
+			if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
 			    (!set_properties || synaptics_init(psmouse) == 0)) {
 				return PSMOUSE_SYNAPTICS;
 			}
@@ -801,7 +813,7 @@
  */
 	if (max_proto > PSMOUSE_IMEX &&
 			cypress_detect(psmouse, set_properties) == 0) {
-		if (cypress_supported()) {
+		if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
 			if (cypress_init(psmouse) == 0)
 				return PSMOUSE_CYPRESS;
 
@@ -1160,7 +1172,7 @@
 	if (psmouse_max_proto != PSMOUSE_PS2) {
 		psmouse->set_rate(psmouse, psmouse->rate);
 		psmouse->set_resolution(psmouse, psmouse->resolution);
-		ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
+		psmouse->set_scale(psmouse, PSMOUSE_SCALE11);
 	}
 }
 
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index c2ff137..d02e1bd 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -36,6 +36,11 @@
 	PSMOUSE_FULL_PACKET
 } psmouse_ret_t;
 
+enum psmouse_scale {
+	PSMOUSE_SCALE11,
+	PSMOUSE_SCALE21
+};
+
 struct psmouse {
 	void *private;
 	struct input_dev *dev;
@@ -67,6 +72,7 @@
 	psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse);
 	void (*set_rate)(struct psmouse *psmouse, unsigned int rate);
 	void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution);
+	void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale);
 
 	int (*reconnect)(struct psmouse *psmouse);
 	void (*disconnect)(struct psmouse *psmouse);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 7e705ee..dda6058 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,9 +67,6 @@
 #define X_MAX_POSITIVE 8176
 #define Y_MAX_POSITIVE 8176
 
-/* maximum ABS_MT_POSITION displacement (in mm) */
-#define DMAX 10
-
 /*****************************************************************************
  *	Stuff we need even when we do not want native Synaptics support
  ****************************************************************************/
@@ -123,32 +120,41 @@
 
 static bool cr48_profile_sensor;
 
+#define ANY_BOARD_ID 0
 struct min_max_quirk {
 	const char * const *pnp_ids;
+	struct {
+		unsigned long int min, max;
+	} board_id;
 	int x_min, x_max, y_min, y_max;
 };
 
 static const struct min_max_quirk min_max_pnpid_table[] = {
 	{
 		(const char * const []){"LEN0033", NULL},
+		{ANY_BOARD_ID, ANY_BOARD_ID},
 		1024, 5052, 2258, 4832
 	},
 	{
-		(const char * const []){"LEN0035", "LEN0042", NULL},
+		(const char * const []){"LEN0042", NULL},
+		{ANY_BOARD_ID, ANY_BOARD_ID},
 		1232, 5710, 1156, 4696
 	},
 	{
 		(const char * const []){"LEN0034", "LEN0036", "LEN0037",
 					"LEN0039", "LEN2002", "LEN2004",
 					NULL},
+		{ANY_BOARD_ID, 2961},
 		1024, 5112, 2024, 4832
 	},
 	{
 		(const char * const []){"LEN2001", NULL},
+		{ANY_BOARD_ID, ANY_BOARD_ID},
 		1024, 5022, 2508, 4832
 	},
 	{
 		(const char * const []){"LEN2006", NULL},
+		{ANY_BOARD_ID, ANY_BOARD_ID},
 		1264, 5675, 1171, 4688
 	},
 	{ }
@@ -175,9 +181,7 @@
 	"LEN0041",
 	"LEN0042", /* Yoga */
 	"LEN0045",
-	"LEN0046",
 	"LEN0047",
-	"LEN0048",
 	"LEN0049",
 	"LEN2000",
 	"LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@
 	return 0;
 }
 
+static int synaptics_more_extended_queries(struct psmouse *psmouse)
+{
+	struct synaptics_data *priv = psmouse->private;
+	unsigned char buf[3];
+
+	if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
+		return -1;
+
+	priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
+
+	return 0;
+}
+
 /*
- * Read the board id from the touchpad
+ * Read the board id and the "More Extended Queries" from the touchpad
  * The board id is encoded in the "QUERY MODES" response
  */
-static int synaptics_board_id(struct psmouse *psmouse)
+static int synaptics_query_modes(struct psmouse *psmouse)
 {
 	struct synaptics_data *priv = psmouse->private;
 	unsigned char bid[3];
 
+	/* firmwares prior 7.5 have no board_id encoded */
+	if (SYN_ID_FULL(priv->identity) < 0x705)
+		return 0;
+
 	if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
 		return -1;
 	priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
+
+	if (SYN_MEXT_CAP_BIT(bid[0]))
+		return synaptics_more_extended_queries(psmouse);
+
 	return 0;
 }
 
@@ -346,7 +371,6 @@
 {
 	struct synaptics_data *priv = psmouse->private;
 	unsigned char resp[3];
-	int i;
 
 	if (SYN_ID_MAJOR(priv->identity) < 4)
 		return 0;
@@ -358,17 +382,6 @@
 		}
 	}
 
-	for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
-		if (psmouse_matches_pnp_id(psmouse,
-					   min_max_pnpid_table[i].pnp_ids)) {
-			priv->x_min = min_max_pnpid_table[i].x_min;
-			priv->x_max = min_max_pnpid_table[i].x_max;
-			priv->y_min = min_max_pnpid_table[i].y_min;
-			priv->y_max = min_max_pnpid_table[i].y_max;
-			return 0;
-		}
-	}
-
 	if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
 	    SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
 		if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@
 		} else {
 			priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
 			priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
+			psmouse_info(psmouse,
+				     "queried max coordinates: x [..%d], y [..%d]\n",
+				     priv->x_max, priv->y_max);
 		}
 	}
 
-	if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
-	    SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
+	if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
+	    (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
+	     /*
+	      * Firmware v8.1 does not report proper number of extended
+	      * capabilities, but has been proven to report correct min
+	      * coordinates.
+	      */
+	     SYN_ID_FULL(priv->identity) == 0x801)) {
 		if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
 			psmouse_warn(psmouse,
 				     "device claims to have min coordinates query, but I'm not able to read it.\n");
 		} else {
 			priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
 			priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
+			psmouse_info(psmouse,
+				     "queried min coordinates: x [%d..], y [%d..]\n",
+				     priv->x_min, priv->y_min);
 		}
 	}
 
 	return 0;
 }
 
+/*
+ * Apply quirk(s) if the hardware matches
+ */
+
+static void synaptics_apply_quirks(struct psmouse *psmouse)
+{
+	struct synaptics_data *priv = psmouse->private;
+	int i;
+
+	for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
+		if (!psmouse_matches_pnp_id(psmouse,
+					    min_max_pnpid_table[i].pnp_ids))
+			continue;
+
+		if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
+		    priv->board_id < min_max_pnpid_table[i].board_id.min)
+			continue;
+
+		if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
+		    priv->board_id > min_max_pnpid_table[i].board_id.max)
+			continue;
+
+		priv->x_min = min_max_pnpid_table[i].x_min;
+		priv->x_max = min_max_pnpid_table[i].x_max;
+		priv->y_min = min_max_pnpid_table[i].y_min;
+		priv->y_max = min_max_pnpid_table[i].y_max;
+		psmouse_info(psmouse,
+			     "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
+			     priv->x_min, priv->x_max,
+			     priv->y_min, priv->y_max);
+		break;
+	}
+}
+
 static int synaptics_query_hardware(struct psmouse *psmouse)
 {
 	if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@
 		return -1;
 	if (synaptics_firmware_id(psmouse))
 		return -1;
-	if (synaptics_board_id(psmouse))
+	if (synaptics_query_modes(psmouse))
 		return -1;
 	if (synaptics_capability(psmouse))
 		return -1;
 	if (synaptics_resolution(psmouse))
 		return -1;
 
+	synaptics_apply_quirks(psmouse);
+
 	return 0;
 }
 
@@ -516,18 +577,22 @@
 	return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
 }
 
-static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet)
+static void synaptics_pass_pt_packet(struct psmouse *psmouse,
+				     struct serio *ptport,
+				     unsigned char *packet)
 {
+	struct synaptics_data *priv = psmouse->private;
 	struct psmouse *child = serio_get_drvdata(ptport);
 
 	if (child && child->state == PSMOUSE_ACTIVATED) {
-		serio_interrupt(ptport, packet[1], 0);
+		serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
 		serio_interrupt(ptport, packet[4], 0);
 		serio_interrupt(ptport, packet[5], 0);
 		if (child->pktsize == 4)
 			serio_interrupt(ptport, packet[2], 0);
-	} else
+	} else {
 		serio_interrupt(ptport, packet[1], 0);
+	}
 }
 
 static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@
 	}
 }
 
+static void synaptics_parse_ext_buttons(const unsigned char buf[],
+					struct synaptics_data *priv,
+					struct synaptics_hw_state *hw)
+{
+	unsigned int ext_bits =
+		(SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
+	unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
+
+	hw->ext_buttons = buf[4] & ext_mask;
+	hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
+}
+
 static bool is_forcepad;
 
 static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@
 			hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
 		}
 
-		if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
+		if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
 		    ((buf[0] ^ buf[3]) & 0x02)) {
-			switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
-			default:
-				/*
-				 * if nExtBtn is greater than 8 it should be
-				 * considered invalid and treated as 0
-				 */
-				break;
-			case 8:
-				hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
-				hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
-			case 6:
-				hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
-				hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
-			case 4:
-				hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
-				hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
-			case 2:
-				hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
-				hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
-			}
+			synaptics_parse_ext_buttons(buf, priv, hw);
 		}
 	} else {
 		hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@
 	}
 }
 
+static void synaptics_report_ext_buttons(struct psmouse *psmouse,
+					 const struct synaptics_hw_state *hw)
+{
+	struct input_dev *dev = psmouse->dev;
+	struct synaptics_data *priv = psmouse->private;
+	int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
+	char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+	int i;
+
+	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
+		return;
+
+	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
+	if (SYN_ID_FULL(priv->identity) == 0x801 &&
+	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
+		return;
+
+	if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
+		for (i = 0; i < ext_bits; i++) {
+			input_report_key(dev, BTN_0 + 2 * i,
+				hw->ext_buttons & (1 << i));
+			input_report_key(dev, BTN_1 + 2 * i,
+				hw->ext_buttons & (1 << (i + ext_bits)));
+		}
+		return;
+	}
+
+	/*
+	 * This generation of touchpads has the trackstick buttons
+	 * physically wired to the touchpad. Re-route them through
+	 * the pass-through interface.
+	 */
+	if (!priv->pt_port)
+		return;
+
+	/* The trackstick expects at most 3 buttons */
+	priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons)      |
+			   SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
+			   SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
+
+	synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
+}
+
 static void synaptics_report_buttons(struct psmouse *psmouse,
 				     const struct synaptics_hw_state *hw)
 {
 	struct input_dev *dev = psmouse->dev;
 	struct synaptics_data *priv = psmouse->private;
-	int i;
 
 	input_report_key(dev, BTN_LEFT, hw->left);
 	input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@
 		input_report_key(dev, BTN_BACK, hw->down);
 	}
 
-	for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
-		input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
+	synaptics_report_ext_buttons(psmouse, hw);
 }
 
 static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@
 		pos[i].y = synaptics_invert_y(hw[i]->y);
 	}
 
-	input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res);
+	input_mt_assign_slots(dev, slot, pos, nsemi, 0);
 
 	for (i = 0; i < nsemi; i++) {
 		input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@
 		if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
 		    synaptics_is_pt_packet(psmouse->packet)) {
 			if (priv->pt_port)
-				synaptics_pass_pt_packet(priv->pt_port, psmouse->packet);
+				synaptics_pass_pt_packet(psmouse, priv->pt_port,
+							 psmouse->packet);
 		} else
 			synaptics_process_packet(psmouse);
 
@@ -1116,8 +1216,9 @@
 		__set_bit(BTN_BACK, dev->keybit);
 	}
 
-	for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
-		__set_bit(BTN_0 + i, dev->keybit);
+	if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
+		for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
+			__set_bit(BTN_0 + i, dev->keybit);
 
 	__clear_bit(EV_REL, dev->evbit);
 	__clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@
 
 	if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
 		__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
-		if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
+		if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
+		    !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
 			__set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
 		/* Clickpads report only left button */
 		__clear_bit(BTN_RIGHT, dev->keybit);
@@ -1454,11 +1556,6 @@
 	return __synaptics_init(psmouse, false);
 }
 
-bool synaptics_supported(void)
-{
-	return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_SYNAPTICS */
 
 void __init synaptics_module_init(void)
@@ -1470,9 +1567,4 @@
 	return -ENOSYS;
 }
 
-bool synaptics_supported(void)
-{
-	return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 6faf9bb..ee4bd0d1 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -22,6 +22,7 @@
 #define SYN_QUE_EXT_CAPAB_0C		0x0c
 #define SYN_QUE_EXT_MAX_COORDS		0x0d
 #define SYN_QUE_EXT_MIN_COORDS		0x0f
+#define SYN_QUE_MEXT_CAPAB_10		0x10
 
 /* synatics modes */
 #define SYN_BIT_ABSOLUTE_MODE		(1 << 7)
@@ -53,6 +54,7 @@
 #define SYN_EXT_CAP_REQUESTS(c)		(((c) & 0x700000) >> 20)
 #define SYN_CAP_MULTI_BUTTON_NO(ec)	(((ec) & 0x00f000) >> 12)
 #define SYN_CAP_PRODUCT_ID(ec)		(((ec) & 0xff0000) >> 16)
+#define SYN_MEXT_CAP_BIT(m)		((m) & (1 << 1))
 
 /*
  * The following describes response for the 0x0c query.
@@ -89,6 +91,30 @@
 #define SYN_CAP_REDUCED_FILTERING(ex0c)	((ex0c) & 0x000400)
 #define SYN_CAP_IMAGE_SENSOR(ex0c)	((ex0c) & 0x000800)
 
+/*
+ * The following descibes response for the 0x10 query.
+ *
+ * byte	mask	name			meaning
+ * ----	----	-------			------------
+ * 1	0x01	ext buttons are stick	buttons exported in the extended
+ *					capability are actually meant to be used
+ *					by the tracktick (pass-through).
+ * 1	0x02	SecurePad		the touchpad is a SecurePad, so it
+ *					contains a built-in fingerprint reader.
+ * 1	0xe0	more ext count		how many more extented queries are
+ *					available after this one.
+ * 2	0xff	SecurePad width		the width of the SecurePad fingerprint
+ *					reader.
+ * 3	0xff	SecurePad height	the height of the SecurePad fingerprint
+ *					reader.
+ */
+#define SYN_CAP_EXT_BUTTONS_STICK(ex10)	((ex10) & 0x010000)
+#define SYN_CAP_SECUREPAD(ex10)		((ex10) & 0x020000)
+
+#define SYN_CAP_EXT_BUTTON_STICK_L(eb)	(!!((eb) & 0x01))
+#define SYN_CAP_EXT_BUTTON_STICK_M(eb)	(!!((eb) & 0x02))
+#define SYN_CAP_EXT_BUTTON_STICK_R(eb)	(!!((eb) & 0x04))
+
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)		((m) & (1 << 7))
 #define SYN_MODE_RATE(m)		((m) & (1 << 6))
@@ -143,6 +169,7 @@
 	unsigned long int capabilities;		/* Capabilities */
 	unsigned long int ext_cap;		/* Extended Capabilities */
 	unsigned long int ext_cap_0c;		/* Ext Caps from 0x0c query */
+	unsigned long int ext_cap_10;		/* Ext Caps from 0x10 query */
 	unsigned long int identity;		/* Identification */
 	unsigned int x_res, y_res;		/* X/Y resolution in units/mm */
 	unsigned int x_max, y_max;		/* Max coordinates (from FW) */
@@ -156,6 +183,7 @@
 	bool disable_gesture;			/* disable gestures */
 
 	struct serio *pt_port;			/* Pass-through serio port */
+	unsigned char pt_buttons;		/* Pass-through buttons */
 
 	/*
 	 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
@@ -175,6 +203,5 @@
 int synaptics_init(struct psmouse *psmouse);
 int synaptics_init_relative(struct psmouse *psmouse);
 void synaptics_reset(struct psmouse *psmouse);
-bool synaptics_supported(void);
 
 #endif /* _SYNAPTICS_H */
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 5891752..6261fd6 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -943,6 +943,7 @@
 	tristate "Allwinner sun4i resistive touchscreen controller support"
 	depends on ARCH_SUNXI || COMPILE_TEST
 	depends on HWMON
+	depends on THERMAL || !THERMAL_OF
 	help
 	  This selects support for the resistive touchscreen controller
 	  found on Allwinner sunxi SoCs.
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index baa0d97..1ae4e54 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,6 +23,7 @@
 config IOMMU_IO_PGTABLE_LPAE
 	bool "ARMv7/v8 Long Descriptor Format"
 	select IOMMU_IO_PGTABLE
+	depends on ARM || ARM64 || COMPILE_TEST
 	help
 	  Enable support for the ARM long descriptor pagetable format.
 	  This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@
 	bool "MSM IOMMU Support"
 	depends on ARM
 	depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+	depends on BROKEN
 	select IOMMU_API
 	help
 	  Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7ce5273..dc14fec4 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1186,8 +1186,15 @@
 
 static int __init exynos_iommu_init(void)
 {
+	struct device_node *np;
 	int ret;
 
+	np = of_find_matching_node(NULL, sysmmu_of_match);
+	if (!np)
+		return 0;
+
+	of_node_put(np);
+
 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
 	if (!lv2table_kmem_cache) {
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5a500ed..b610a8d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -56,7 +56,8 @@
 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
 	  * (d)->bits_per_level) + (d)->pg_shift)
 
-#define ARM_LPAE_PAGES_PER_PGD(d)	((d)->pgd_size >> (d)->pg_shift)
+#define ARM_LPAE_PAGES_PER_PGD(d)					\
+	DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
 
 /*
  * Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
 
 #define ARM_LPAE_LVL_IDX(a,l,d)						\
-	(((a) >> ARM_LPAE_LVL_SHIFT(l,d)) &				\
+	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index f59f857..a4ba851 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1376,6 +1376,13 @@
 	struct kmem_cache *p;
 	const unsigned long flags = SLAB_HWCACHE_ALIGN;
 	size_t align = 1 << 10; /* L2 pagetable alignement */
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, omap_iommu_of_match);
+	if (!np)
+		return 0;
+
+	of_node_put(np);
 
 	p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
 			      iopte_cachep_ctor);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 6a8b1ec..9f74fdd 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1015,8 +1015,15 @@
 
 static int __init rk_iommu_init(void)
 {
+	struct device_node *np;
 	int ret;
 
+	np = of_find_matching_node(NULL, rk_iommu_dt_ids);
+	if (!np)
+		return 0;
+
+	of_node_put(np);
+
 	ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
 	if (ret)
 		return ret;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 463c235..4387dae 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -69,6 +69,7 @@
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
 static u32 doorbell_mask_reg;
+static int parent_irq;
 #ifdef CONFIG_PCI_MSI
 static struct irq_domain *armada_370_xp_msi_domain;
 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@
 {
 	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 		armada_xp_mpic_smp_cpu_init();
+
 	return NOTIFY_OK;
 }
 
@@ -364,6 +366,20 @@
 	.priority = 100,
 };
 
+static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
+					unsigned long action, void *hcpu)
+{
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+		enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block mpic_cascaded_cpu_notifier = {
+	.notifier_call = mpic_cascaded_secondary_init,
+	.priority = 100,
+};
+
 #endif /* CONFIG_SMP */
 
 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@
 					     struct device_node *parent)
 {
 	struct resource main_int_res, per_cpu_int_res;
-	int parent_irq, nr_irqs, i;
+	int nr_irqs, i;
 	u32 control;
 
 	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@
 		register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
 #endif
 	} else {
+#ifdef CONFIG_SMP
+		register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+#endif
 		irq_set_chained_handler(parent_irq,
 					armada_370_xp_mpic_handle_cascade_irq);
 	}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d8996bd..596b0a9 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -416,13 +416,14 @@
 {
 	struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
 	struct its_collection *sync_col;
+	unsigned long flags;
 
-	raw_spin_lock(&its->lock);
+	raw_spin_lock_irqsave(&its->lock, flags);
 
 	cmd = its_allocate_entry(its);
 	if (!cmd) {		/* We're soooooo screewed... */
 		pr_err_ratelimited("ITS can't allocate, dropping command\n");
-		raw_spin_unlock(&its->lock);
+		raw_spin_unlock_irqrestore(&its->lock, flags);
 		return;
 	}
 	sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@
 
 post:
 	next_cmd = its_post_commands(its);
-	raw_spin_unlock(&its->lock);
+	raw_spin_unlock_irqrestore(&its->lock, flags);
 
 	its_wait_for_range_completion(its, cmd, next_cmd);
 }
@@ -799,21 +800,43 @@
 {
 	int err;
 	int i;
-	int psz = PAGE_SIZE;
+	int psz = SZ_64K;
 	u64 shr = GITS_BASER_InnerShareable;
 
 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
 		u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
 		u64 type = GITS_BASER_TYPE(val);
 		u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
+		int order = get_order(psz);
+		int alloc_size;
 		u64 tmp;
 		void *base;
 
 		if (type == GITS_BASER_TYPE_NONE)
 			continue;
 
-		/* We're lazy and only allocate a single page for now */
-		base = (void *)get_zeroed_page(GFP_KERNEL);
+		/*
+		 * Allocate as many entries as required to fit the
+		 * range of device IDs that the ITS can grok... The ID
+		 * space being incredibly sparse, this results in a
+		 * massive waste of memory.
+		 *
+		 * For other tables, only allocate a single page.
+		 */
+		if (type == GITS_BASER_TYPE_DEVICE) {
+			u64 typer = readq_relaxed(its->base + GITS_TYPER);
+			u32 ids = GITS_TYPER_DEVBITS(typer);
+
+			order = get_order((1UL << ids) * entry_size);
+			if (order >= MAX_ORDER) {
+				order = MAX_ORDER - 1;
+				pr_warn("%s: Device Table too large, reduce its page order to %u\n",
+					its->msi_chip.of_node->full_name, order);
+			}
+		}
+
+		alloc_size = (1 << order) * PAGE_SIZE;
+		base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 		if (!base) {
 			err = -ENOMEM;
 			goto out_free;
@@ -841,7 +864,7 @@
 			break;
 		}
 
-		val |= (PAGE_SIZE / psz) - 1;
+		val |= (alloc_size / psz) - 1;
 
 		writeq_relaxed(val, its->base + GITS_BASER + i * 8);
 		tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@
 		}
 
 		pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
-			(int)(PAGE_SIZE / entry_size),
+			(int)(alloc_size / entry_size),
 			its_base_type_string[type],
 			(unsigned long)virt_to_phys(base),
 			psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@
 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
 {
 	struct its_device *its_dev = NULL, *tmp;
+	unsigned long flags;
 
-	raw_spin_lock(&its->lock);
+	raw_spin_lock_irqsave(&its->lock, flags);
 
 	list_for_each_entry(tmp, &its->its_device_list, entry) {
 		if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@
 		}
 	}
 
-	raw_spin_unlock(&its->lock);
+	raw_spin_unlock_irqrestore(&its->lock, flags);
 
 	return its_dev;
 }
@@ -1040,6 +1064,7 @@
 {
 	struct its_device *dev;
 	unsigned long *lpi_map;
+	unsigned long flags;
 	void *itt;
 	int lpi_base;
 	int nr_lpis;
@@ -1056,7 +1081,7 @@
 	nr_ites = max(2UL, roundup_pow_of_two(nvecs));
 	sz = nr_ites * its->ite_size;
 	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
-	itt = kmalloc(sz, GFP_KERNEL);
+	itt = kzalloc(sz, GFP_KERNEL);
 	lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
 
 	if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@
 	dev->device_id = dev_id;
 	INIT_LIST_HEAD(&dev->entry);
 
-	raw_spin_lock(&its->lock);
+	raw_spin_lock_irqsave(&its->lock, flags);
 	list_add(&dev->entry, &its->its_device_list);
-	raw_spin_unlock(&its->lock);
+	raw_spin_unlock_irqrestore(&its->lock, flags);
 
 	/* Bind the device to the first possible CPU */
 	cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@
 
 static void its_free_device(struct its_device *its_dev)
 {
-	raw_spin_lock(&its_dev->its->lock);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&its_dev->its->lock, flags);
 	list_del(&its_dev->entry);
-	raw_spin_unlock(&its_dev->its->lock);
+	raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
 	kfree(its_dev->itt);
 	kfree(its_dev);
 }
@@ -1112,31 +1139,69 @@
 	return 0;
 }
 
+struct its_pci_alias {
+	struct pci_dev	*pdev;
+	u32		dev_id;
+	u32		count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+	int msi, msix;
+
+	msi = max(pci_msi_vec_count(pdev), 0);
+	msix = max(pci_msix_vec_count(pdev), 0);
+
+	return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+	struct its_pci_alias *dev_alias = data;
+
+	dev_alias->dev_id = alias;
+	if (pdev != dev_alias->pdev)
+		dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+	return 0;
+}
+
 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
 			   int nvec, msi_alloc_info_t *info)
 {
 	struct pci_dev *pdev;
 	struct its_node *its;
-	u32 dev_id;
 	struct its_device *its_dev;
+	struct its_pci_alias dev_alias;
 
 	if (!dev_is_pci(dev))
 		return -EINVAL;
 
 	pdev = to_pci_dev(dev);
-	dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+	dev_alias.pdev = pdev;
+	dev_alias.count = nvec;
+
+	pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
 	its = domain->parent->host_data;
 
-	its_dev = its_find_device(its, dev_id);
-	if (WARN_ON(its_dev))
-		return -EINVAL;
+	its_dev = its_find_device(its, dev_alias.dev_id);
+	if (its_dev) {
+		/*
+		 * We already have seen this ID, probably through
+		 * another alias (PCI bridge of some sort). No need to
+		 * create the device.
+		 */
+		dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+		goto out;
+	}
 
-	its_dev = its_create_device(its, dev_id, nvec);
+	its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
 	if (!its_dev)
 		return -ENOMEM;
 
-	dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec));
-
+	dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
+		dev_alias.count, ilog2(dev_alias.count));
+out:
 	info->scratchpad[0].ptr = its_dev;
 	info->scratchpad[1].ptr = dev;
 	return 0;
@@ -1255,6 +1320,34 @@
 	.deactivate		= its_irq_domain_deactivate,
 };
 
+static int its_force_quiescent(void __iomem *base)
+{
+	u32 count = 1000000;	/* 1s */
+	u32 val;
+
+	val = readl_relaxed(base + GITS_CTLR);
+	if (val & GITS_CTLR_QUIESCENT)
+		return 0;
+
+	/* Disable the generation of all interrupts to this ITS */
+	val &= ~GITS_CTLR_ENABLE;
+	writel_relaxed(val, base + GITS_CTLR);
+
+	/* Poll GITS_CTLR and wait until ITS becomes quiescent */
+	while (1) {
+		val = readl_relaxed(base + GITS_CTLR);
+		if (val & GITS_CTLR_QUIESCENT)
+			return 0;
+
+		count--;
+		if (!count)
+			return -EBUSY;
+
+		cpu_relax();
+		udelay(1);
+	}
+}
+
 static int its_probe(struct device_node *node, struct irq_domain *parent)
 {
 	struct resource res;
@@ -1283,6 +1376,13 @@
 		goto out_unmap;
 	}
 
+	err = its_force_quiescent(its_base);
+	if (err) {
+		pr_warn("%s: failed to quiesce, giving up\n",
+			node->full_name);
+		goto out_unmap;
+	}
+
 	pr_info("ITS: %s\n", node->full_name);
 
 	its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@
 	writeq_relaxed(baser, its->base + GITS_CBASER);
 	tmp = readq_relaxed(its->base + GITS_CBASER);
 	writeq_relaxed(0, its->base + GITS_CWRITER);
-	writel_relaxed(1, its->base + GITS_CTLR);
+	writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
 
 	if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
 		pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@
 
 int its_cpu_init(void)
 {
-	if (!gic_rdists_supports_plpis()) {
-		pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
-		return -ENXIO;
-	}
-
 	if (!list_empty(&its_nodes)) {
+		if (!gic_rdists_supports_plpis()) {
+			pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+			return -ENXIO;
+		}
 		its_cpu_init_lpis();
 		its_cpu_init_collection();
 	}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2..fd8850d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -466,7 +466,7 @@
 		tlist |= 1 << (mpidr & 0xf);
 
 		cpu = cpumask_next(cpu, mask);
-		if (cpu == nr_cpu_ids)
+		if (cpu >= nr_cpu_ids)
 			goto out;
 
 		mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7..471e1cdc1 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@
 static void gic_mask_irq(struct irq_data *d)
 {
 	u32 mask = 1 << (gic_irq(d) % 32);
+	unsigned long flags;
 
-	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
 	if (gic_arch_extn.irq_mask)
 		gic_arch_extn.irq_mask(d);
-	raw_spin_unlock(&irq_controller_lock);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_unmask_irq(struct irq_data *d)
 {
 	u32 mask = 1 << (gic_irq(d) % 32);
+	unsigned long flags;
 
-	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	if (gic_arch_extn.irq_unmask)
 		gic_arch_extn.irq_unmask(d);
 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
-	raw_spin_unlock(&irq_controller_lock);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@
 {
 	void __iomem *base = gic_dist_base(d);
 	unsigned int gicirq = gic_irq(d);
+	unsigned long flags;
 	int ret;
 
 	/* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@
 			    type != IRQ_TYPE_EDGE_RISING)
 		return -EINVAL;
 
-	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
 	if (gic_arch_extn.irq_set_type)
 		gic_arch_extn.irq_set_type(d, type);
 
 	ret = gic_configure_irq(gicirq, type, base, NULL);
 
-	raw_spin_unlock(&irq_controller_lock);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
 	return ret;
 }
@@ -227,6 +230,7 @@
 	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
 	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
 	u32 val, mask, bit;
+	unsigned long flags;
 
 	if (!force)
 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@
 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
 		return -EINVAL;
 
-	raw_spin_lock(&irq_controller_lock);
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	mask = 0xff << shift;
 	bit = gic_cpu_map[cpu] << shift;
 	val = readl_relaxed(reg) & ~mask;
 	writel_relaxed(val | bit, reg);
-	raw_spin_unlock(&irq_controller_lock);
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 
 	return IRQ_SET_MASK_OK;
 }
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1daa7ca..9acdc08 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -192,14 +192,6 @@
 	}
 }
 
-unsigned int gic_get_timer_pending(void)
-{
-	unsigned int vpe_pending;
-
-	vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-	return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
-}
-
 static void gic_bind_eic_interrupt(int irq, int set)
 {
 	/* Convert irq vector # to hw int # */
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 3c92780..ff48da6 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1755,7 +1755,7 @@
 		enable_hwirq(hc);
 		spin_unlock_irqrestore(&hc->lock, flags);
 		/* Timeout 80ms */
-		current->state = TASK_UNINTERRUPTIBLE;
+		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout((80 * HZ) / 1000);
 		printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
 		       hc->irq, hc->irqcnt);
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 6a7447c..358a574 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1609,7 +1609,7 @@
 	if (ints[0] > 1)
 		membase = (unsigned long)ints[2];
 	if (str && *str) {
-		strcpy(sid, str);
+		strlcpy(sid, str, sizeof(sid));
 		icn_id = sid;
 		if ((p = strchr(sid, ','))) {
 			*p++ = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08981be..713a962 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,9 +18,11 @@
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <linux/backing-dev.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
+#include <linux/rbtree.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
 #include <crypto/hash.h>
@@ -58,7 +60,8 @@
 	atomic_t io_pending;
 	int error;
 	sector_t sector;
-	struct dm_crypt_io *base_io;
+
+	struct rb_node rb_node;
 } CRYPTO_MINALIGN_ATTR;
 
 struct dm_crypt_request {
@@ -108,7 +111,8 @@
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
-enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
+	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
 
 /*
  * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@
 	 * pool for per bio private data, crypto requests and
 	 * encryption requeusts/buffer pages
 	 */
-	mempool_t *io_pool;
 	mempool_t *req_pool;
 	mempool_t *page_pool;
 	struct bio_set *bs;
+	struct mutex bio_alloc_lock;
 
 	struct workqueue_struct *io_queue;
 	struct workqueue_struct *crypt_queue;
 
+	struct task_struct *write_thread;
+	wait_queue_head_t write_thread_wait;
+	struct rb_root write_tree;
+
 	char *cipher;
 	char *cipher_string;
 
@@ -172,9 +180,6 @@
 };
 
 #define MIN_IOS        16
-#define MIN_POOL_PAGES 32
-
-static struct kmem_cache *_crypt_io_pool;
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@
 	return 0;
 }
 
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages, indicated by
- * *out_of_pages set to 1.
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+ * mempool of 256 pages, two processes, each wanting 256, pages allocate from
+ * the mempool concurrently, it may deadlock in a situation where both processes
+ * have allocated 128 pages and the mempool is exhausted.
+ *
+ * In order to avoid this scenario we allocate the pages under a mutex.
+ *
+ * In order to not degrade performance with excessive locking, we try
+ * non-blocking allocations without a mutex first but on failure we fallback
+ * to blocking allocations with a mutex.
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
-				      unsigned *out_of_pages)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 {
 	struct crypt_config *cc = io->cc;
 	struct bio *clone;
 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-	unsigned i, len;
+	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+	unsigned i, len, remaining_size;
 	struct page *page;
+	struct bio_vec *bvec;
+
+retry:
+	if (unlikely(gfp_mask & __GFP_WAIT))
+		mutex_lock(&cc->bio_alloc_lock);
 
 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
 	if (!clone)
-		return NULL;
+		goto return_clone;
 
 	clone_init(io, clone);
-	*out_of_pages = 0;
+
+	remaining_size = size;
 
 	for (i = 0; i < nr_iovecs; i++) {
 		page = mempool_alloc(cc->page_pool, gfp_mask);
 		if (!page) {
-			*out_of_pages = 1;
-			break;
+			crypt_free_buffer_pages(cc, clone);
+			bio_put(clone);
+			gfp_mask |= __GFP_WAIT;
+			goto retry;
 		}
 
-		/*
-		 * If additional pages cannot be allocated without waiting,
-		 * return a partially-allocated bio.  The caller will then try
-		 * to allocate more bios while submitting this partial bio.
-		 */
-		gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
 
-		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
+		bvec->bv_page = page;
+		bvec->bv_len = len;
+		bvec->bv_offset = 0;
 
-		if (!bio_add_page(clone, page, len, 0)) {
-			mempool_free(page, cc->page_pool);
-			break;
-		}
+		clone->bi_iter.bi_size += len;
 
-		size -= len;
+		remaining_size -= len;
 	}
 
-	if (!clone->bi_iter.bi_size) {
-		bio_put(clone);
-		return NULL;
-	}
+return_clone:
+	if (unlikely(gfp_mask & __GFP_WAIT))
+		mutex_unlock(&cc->bio_alloc_lock);
 
 	return clone;
 }
@@ -1020,7 +1038,6 @@
 	io->base_bio = bio;
 	io->sector = sector;
 	io->error = 0;
-	io->base_io = NULL;
 	io->ctx.req = NULL;
 	atomic_set(&io->io_pending, 0);
 }
@@ -1033,13 +1050,11 @@
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
- * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->cc;
 	struct bio *base_bio = io->base_bio;
-	struct dm_crypt_io *base_io = io->base_io;
 	int error = io->error;
 
 	if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@
 
 	if (io->ctx.req)
 		crypt_free_req(cc, io->ctx.req, base_bio);
-	if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
-		mempool_free(io, cc->io_pool);
 
-	if (likely(!base_io))
-		bio_endio(base_bio, error);
-	else {
-		if (error && !base_io->error)
-			base_io->error = error;
-		crypt_dec_pending(base_io);
-	}
+	bio_endio(base_bio, error);
 }
 
 /*
@@ -1138,37 +1145,97 @@
 	return 0;
 }
 
-static void kcryptd_io_write(struct dm_crypt_io *io)
-{
-	struct bio *clone = io->ctx.bio_out;
-	generic_make_request(clone);
-}
-
-static void kcryptd_io(struct work_struct *work)
+static void kcryptd_io_read_work(struct work_struct *work)
 {
 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
 
-	if (bio_data_dir(io->base_bio) == READ) {
-		crypt_inc_pending(io);
-		if (kcryptd_io_read(io, GFP_NOIO))
-			io->error = -ENOMEM;
-		crypt_dec_pending(io);
-	} else
-		kcryptd_io_write(io);
+	crypt_inc_pending(io);
+	if (kcryptd_io_read(io, GFP_NOIO))
+		io->error = -ENOMEM;
+	crypt_dec_pending(io);
 }
 
-static void kcryptd_queue_io(struct dm_crypt_io *io)
+static void kcryptd_queue_read(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->cc;
 
-	INIT_WORK(&io->work, kcryptd_io);
+	INIT_WORK(&io->work, kcryptd_io_read_work);
 	queue_work(cc->io_queue, &io->work);
 }
 
+static void kcryptd_io_write(struct dm_crypt_io *io)
+{
+	struct bio *clone = io->ctx.bio_out;
+
+	generic_make_request(clone);
+}
+
+#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
+
+static int dmcrypt_write(void *data)
+{
+	struct crypt_config *cc = data;
+	struct dm_crypt_io *io;
+
+	while (1) {
+		struct rb_root write_tree;
+		struct blk_plug plug;
+
+		DECLARE_WAITQUEUE(wait, current);
+
+		spin_lock_irq(&cc->write_thread_wait.lock);
+continue_locked:
+
+		if (!RB_EMPTY_ROOT(&cc->write_tree))
+			goto pop_from_list;
+
+		__set_current_state(TASK_INTERRUPTIBLE);
+		__add_wait_queue(&cc->write_thread_wait, &wait);
+
+		spin_unlock_irq(&cc->write_thread_wait.lock);
+
+		if (unlikely(kthread_should_stop())) {
+			set_task_state(current, TASK_RUNNING);
+			remove_wait_queue(&cc->write_thread_wait, &wait);
+			break;
+		}
+
+		schedule();
+
+		set_task_state(current, TASK_RUNNING);
+		spin_lock_irq(&cc->write_thread_wait.lock);
+		__remove_wait_queue(&cc->write_thread_wait, &wait);
+		goto continue_locked;
+
+pop_from_list:
+		write_tree = cc->write_tree;
+		cc->write_tree = RB_ROOT;
+		spin_unlock_irq(&cc->write_thread_wait.lock);
+
+		BUG_ON(rb_parent(write_tree.rb_node));
+
+		/*
+		 * Note: we cannot walk the tree here with rb_next because
+		 * the structures may be freed when kcryptd_io_write is called.
+		 */
+		blk_start_plug(&plug);
+		do {
+			io = crypt_io_from_node(rb_first(&write_tree));
+			rb_erase(&io->rb_node, &write_tree);
+			kcryptd_io_write(io);
+		} while (!RB_EMPTY_ROOT(&write_tree));
+		blk_finish_plug(&plug);
+	}
+	return 0;
+}
+
 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 {
 	struct bio *clone = io->ctx.bio_out;
 	struct crypt_config *cc = io->cc;
+	unsigned long flags;
+	sector_t sector;
+	struct rb_node **rbp, *parent;
 
 	if (unlikely(io->error < 0)) {
 		crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@
 
 	clone->bi_iter.bi_sector = cc->start + io->sector;
 
-	if (async)
-		kcryptd_queue_io(io);
-	else
+	if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
 		generic_make_request(clone);
+		return;
+	}
+
+	spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
+	rbp = &cc->write_tree.rb_node;
+	parent = NULL;
+	sector = io->sector;
+	while (*rbp) {
+		parent = *rbp;
+		if (sector < crypt_io_from_node(parent)->sector)
+			rbp = &(*rbp)->rb_left;
+		else
+			rbp = &(*rbp)->rb_right;
+	}
+	rb_link_node(&io->rb_node, parent, rbp);
+	rb_insert_color(&io->rb_node, &cc->write_tree);
+
+	wake_up_locked(&cc->write_thread_wait);
+	spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
 }
 
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->cc;
 	struct bio *clone;
-	struct dm_crypt_io *new_io;
 	int crypt_finished;
-	unsigned out_of_pages = 0;
-	unsigned remaining = io->base_bio->bi_iter.bi_size;
 	sector_t sector = io->sector;
 	int r;
 
@@ -1205,80 +1286,30 @@
 	crypt_inc_pending(io);
 	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
 
-	/*
-	 * The allocated buffers can be smaller than the whole bio,
-	 * so repeat the whole process until all the data can be handled.
-	 */
-	while (remaining) {
-		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
-		if (unlikely(!clone)) {
-			io->error = -ENOMEM;
-			break;
-		}
-
-		io->ctx.bio_out = clone;
-		io->ctx.iter_out = clone->bi_iter;
-
-		remaining -= clone->bi_iter.bi_size;
-		sector += bio_sectors(clone);
-
-		crypt_inc_pending(io);
-
-		r = crypt_convert(cc, &io->ctx);
-		if (r < 0)
-			io->error = -EIO;
-
-		crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
-
-		/* Encryption was already finished, submit io now */
-		if (crypt_finished) {
-			kcryptd_crypt_write_io_submit(io, 0);
-
-			/*
-			 * If there was an error, do not try next fragments.
-			 * For async, error is processed in async handler.
-			 */
-			if (unlikely(r < 0))
-				break;
-
-			io->sector = sector;
-		}
-
-		/*
-		 * Out of memory -> run queues
-		 * But don't wait if split was due to the io size restriction
-		 */
-		if (unlikely(out_of_pages))
-			congestion_wait(BLK_RW_ASYNC, HZ/100);
-
-		/*
-		 * With async crypto it is unsafe to share the crypto context
-		 * between fragments, so switch to a new dm_crypt_io structure.
-		 */
-		if (unlikely(!crypt_finished && remaining)) {
-			new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
-			crypt_io_init(new_io, io->cc, io->base_bio, sector);
-			crypt_inc_pending(new_io);
-			crypt_convert_init(cc, &new_io->ctx, NULL,
-					   io->base_bio, sector);
-			new_io->ctx.iter_in = io->ctx.iter_in;
-
-			/*
-			 * Fragments after the first use the base_io
-			 * pending count.
-			 */
-			if (!io->base_io)
-				new_io->base_io = io;
-			else {
-				new_io->base_io = io->base_io;
-				crypt_inc_pending(io->base_io);
-				crypt_dec_pending(io);
-			}
-
-			io = new_io;
-		}
+	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+	if (unlikely(!clone)) {
+		io->error = -EIO;
+		goto dec;
 	}
 
+	io->ctx.bio_out = clone;
+	io->ctx.iter_out = clone->bi_iter;
+
+	sector += bio_sectors(clone);
+
+	crypt_inc_pending(io);
+	r = crypt_convert(cc, &io->ctx);
+	if (r)
+		io->error = -EIO;
+	crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
+
+	/* Encryption was already finished, submit io now */
+	if (crypt_finished) {
+		kcryptd_crypt_write_io_submit(io, 0);
+		io->sector = sector;
+	}
+
+dec:
 	crypt_dec_pending(io);
 }
 
@@ -1481,6 +1512,9 @@
 	if (!cc)
 		return;
 
+	if (cc->write_thread)
+		kthread_stop(cc->write_thread);
+
 	if (cc->io_queue)
 		destroy_workqueue(cc->io_queue);
 	if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@
 		mempool_destroy(cc->page_pool);
 	if (cc->req_pool)
 		mempool_destroy(cc->req_pool);
-	if (cc->io_pool)
-		mempool_destroy(cc->io_pool);
 
 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 		cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@
 	char dummy;
 
 	static struct dm_arg _args[] = {
-		{0, 1, "Invalid number of feature args"},
+		{0, 3, "Invalid number of feature args"},
 	};
 
 	if (argc < 5) {
@@ -1710,13 +1742,6 @@
 	if (ret < 0)
 		goto bad;
 
-	ret = -ENOMEM;
-	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
-	if (!cc->io_pool) {
-		ti->error = "Cannot allocate crypt io mempool";
-		goto bad;
-	}
-
 	cc->dmreq_start = sizeof(struct ablkcipher_request);
 	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
 	cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@
 		iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
 	}
 
+	ret = -ENOMEM;
 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
 			sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
 	if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@
 		      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
 		      ARCH_KMALLOC_MINALIGN);
 
-	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+	cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
 	if (!cc->page_pool) {
 		ti->error = "Cannot allocate page mempool";
 		goto bad;
@@ -1758,6 +1784,8 @@
 		goto bad;
 	}
 
+	mutex_init(&cc->bio_alloc_lock);
+
 	ret = -EINVAL;
 	if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@
 		if (ret)
 			goto bad;
 
-		opt_string = dm_shift_arg(&as);
+		while (opt_params--) {
+			opt_string = dm_shift_arg(&as);
+			if (!opt_string) {
+				ti->error = "Not enough feature arguments";
+				goto bad;
+			}
 
-		if (opt_params == 1 && opt_string &&
-		    !strcasecmp(opt_string, "allow_discards"))
-			ti->num_discard_bios = 1;
-		else if (opt_params) {
-			ret = -EINVAL;
-			ti->error = "Invalid feature arguments";
-			goto bad;
+			if (!strcasecmp(opt_string, "allow_discards"))
+				ti->num_discard_bios = 1;
+
+			else if (!strcasecmp(opt_string, "same_cpu_crypt"))
+				set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+
+			else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
+				set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+
+			else {
+				ti->error = "Invalid feature arguments";
+				goto bad;
+			}
 		}
 	}
 
@@ -1807,13 +1846,28 @@
 		goto bad;
 	}
 
-	cc->crypt_queue = alloc_workqueue("kcryptd",
-					  WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+	else
+		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+						  num_online_cpus());
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
 		goto bad;
 	}
 
+	init_waitqueue_head(&cc->write_thread_wait);
+	cc->write_tree = RB_ROOT;
+
+	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+	if (IS_ERR(cc->write_thread)) {
+		ret = PTR_ERR(cc->write_thread);
+		cc->write_thread = NULL;
+		ti->error = "Couldn't spawn write thread";
+		goto bad;
+	}
+	wake_up_process(cc->write_thread);
+
 	ti->num_flush_bios = 1;
 	ti->discard_zeroes_data_unsupported = true;
 
@@ -1848,7 +1902,7 @@
 
 	if (bio_data_dir(io->base_bio) == READ) {
 		if (kcryptd_io_read(io, GFP_NOWAIT))
-			kcryptd_queue_io(io);
+			kcryptd_queue_read(io);
 	} else
 		kcryptd_queue_crypt(io);
 
@@ -1860,6 +1914,7 @@
 {
 	struct crypt_config *cc = ti->private;
 	unsigned i, sz = 0;
+	int num_feature_args = 0;
 
 	switch (type) {
 	case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@
 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
 				cc->dev->name, (unsigned long long)cc->start);
 
-		if (ti->num_discard_bios)
-			DMEMIT(" 1 allow_discards");
+		num_feature_args += !!ti->num_discard_bios;
+		num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+		if (num_feature_args) {
+			DMEMIT(" %d", num_feature_args);
+			if (ti->num_discard_bios)
+				DMEMIT(" allow_discards");
+			if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+				DMEMIT(" same_cpu_crypt");
+			if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
+				DMEMIT(" submit_from_crypt_cpus");
+		}
 
 		break;
 	}
@@ -1976,7 +2041,7 @@
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 13, 0},
+	.version = {1, 14, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
@@ -1994,15 +2059,9 @@
 {
 	int r;
 
-	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
-	if (!_crypt_io_pool)
-		return -ENOMEM;
-
 	r = dm_register_target(&crypt_target);
-	if (r < 0) {
+	if (r < 0)
 		DMERR("register failed %d", r);
-		kmem_cache_destroy(_crypt_io_pool);
-	}
 
 	return r;
 }
@@ -2010,7 +2069,6 @@
 static void __exit dm_crypt_exit(void)
 {
 	dm_unregister_target(&crypt_target);
-	kmem_cache_destroy(_crypt_io_pool);
 }
 
 module_init(dm_crypt_init);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index c09359d..37de017 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -290,6 +290,12 @@
 	unsigned short logical_block_size = queue_logical_block_size(q);
 	sector_t num_sectors;
 
+	/* Reject unsupported discard requests */
+	if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+		dec_count(io, region, -EOPNOTSUPP);
+		return;
+	}
+
 	/*
 	 * where->count may be zero if rw holds a flush and we need to
 	 * send a zero-sized flush.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 7dfdb5c..089d627 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -604,6 +604,15 @@
 		return;
 	}
 
+	/*
+	 * If the bio is discard, return an error, but do not
+	 * degrade the array.
+	 */
+	if (bio->bi_rw & REQ_DISCARD) {
+		bio_endio(bio, -EOPNOTSUPP);
+		return;
+	}
+
 	for (i = 0; i < ms->nr_mirrors; i++)
 		if (test_bit(i, &error))
 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 864b03f..8b204ae2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1432,8 +1432,6 @@
 		full_bio->bi_private = pe->full_bio_private;
 		atomic_inc(&full_bio->bi_remaining);
 	}
-	free_pending_exception(pe);
-
 	increment_pending_exceptions_done_count();
 
 	up_write(&s->lock);
@@ -1450,6 +1448,8 @@
 	}
 
 	retry_origin_bios(s, origin_bios);
+
+	free_pending_exception(pe);
 }
 
 static void commit_callback(void *context, int success)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ec1444f..73f2880 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2571,7 +2571,7 @@
 	return 0;
 }
 
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
 {
 	struct mapped_device *md;
 	unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@
 	spin_lock(&_minor_lock);
 
 	md = idr_find(&_minor_idr, minor);
-	if (md && (md == MINOR_ALLOCED ||
-		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
-		   dm_deleting_md(md) ||
-		   test_bit(DMF_FREEING, &md->flags))) {
-		md = NULL;
-		goto out;
+	if (md) {
+		if ((md == MINOR_ALLOCED ||
+		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
+		     dm_deleting_md(md) ||
+		     test_bit(DMF_FREEING, &md->flags))) {
+			md = NULL;
+			goto out;
+		}
+		dm_get(md);
 	}
 
 out:
@@ -2595,16 +2598,6 @@
 
 	return md;
 }
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
-	struct mapped_device *md = dm_find_md(dev);
-
-	if (md)
-		dm_get(md);
-
-	return md;
-}
 EXPORT_SYMBOL_GPL(dm_get_md);
 
 void *dm_get_mdptr(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c8d2bac..cadf9cc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2555,7 +2555,7 @@
 	return err ? err : len;
 }
 static struct rdev_sysfs_entry rdev_state =
-__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
 
 static ssize_t
 errors_show(struct md_rdev *rdev, char *page)
@@ -3638,7 +3638,8 @@
 	return err ?: len;
 }
 static struct md_sysfs_entry md_resync_start =
-__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
+__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+		resync_start_show, resync_start_store);
 
 /*
  * The array state can be:
@@ -3851,7 +3852,7 @@
 	return err ?: len;
 }
 static struct md_sysfs_entry md_array_state =
-__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
 
 static ssize_t
 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
@@ -4101,7 +4102,7 @@
 }
 
 static struct md_sysfs_entry md_metadata =
-__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
 
 static ssize_t
 action_show(struct mddev *mddev, char *page)
@@ -4189,7 +4190,7 @@
 }
 
 static struct md_sysfs_entry md_scan_mode =
-__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
 
 static ssize_t
 last_sync_action_show(struct mddev *mddev, char *page)
@@ -4335,7 +4336,8 @@
 	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
 }
 
-static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
+static struct md_sysfs_entry md_sync_completed =
+	__ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
 
 static ssize_t
 min_sync_show(struct mddev *mddev, char *page)
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index cfbf961..ebb280a 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -78,7 +78,9 @@
 	if (r)
 		return r;
 
-	return count > 1;
+	*result = count > 1;
+
+	return 0;
 }
 
 static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4153da5..d34e238 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -560,7 +560,7 @@
 		if (test_bit(WriteMostly, &rdev->flags)) {
 			/* Don't balance among write-mostly, just
 			 * use the first as a last resort */
-			if (best_disk < 0) {
+			if (best_dist_disk < 0) {
 				if (is_badblock(rdev, this_sector, sectors,
 						&first_bad, &bad_sectors)) {
 					if (first_bad < this_sector)
@@ -569,7 +569,8 @@
 					best_good_sectors = first_bad - this_sector;
 				} else
 					best_good_sectors = sectors;
-				best_disk = disk;
+				best_dist_disk = disk;
+				best_pending_disk = disk;
 			}
 			continue;
 		}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e75d48c..cd2f96b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5121,12 +5121,17 @@
 		schedule_timeout_uninterruptible(1);
 	}
 	/* Need to check if array will still be degraded after recovery/resync
-	 * We don't need to check the 'failed' flag as when that gets set,
-	 * recovery aborts.
+	 * Note in case of > 1 drive failures it's possible we're rebuilding
+	 * one drive while leaving another faulty drive in array.
 	 */
-	for (i = 0; i < conf->raid_disks; i++)
-		if (conf->disks[i].rdev == NULL)
+	rcu_read_lock();
+	for (i = 0; i < conf->raid_disks; i++) {
+		struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+
+		if (rdev == NULL || test_bit(Faulty, &rdev->flags))
 			still_degraded = 1;
+	}
+	rcu_read_unlock();
 
 	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
 
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9306219..6ad049a 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -341,6 +341,8 @@
 
 	dev->dev_state = MEI_DEV_POWER_DOWN;
 	mei_reset(dev);
+	/* move device to disabled state unconditionally */
+	dev->dev_state = MEI_DEV_DISABLED;
 
 	mutex_unlock(&dev->device_lock);
 
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index e9f1d8d..c53f14a 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -124,7 +124,7 @@
 		    PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
 			ret = PTR_ERR(pwrseq->reset_gpios[i]);
 
-			while (--i)
+			while (i--)
 				gpiod_put(pwrseq->reset_gpios[i]);
 
 			goto clk_put;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 6af0a28..e8a4218 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -21,8 +21,6 @@
 #include <linux/err.h>
 
 #include <linux/clk.h>
-#include <linux/clk/sunxi.h>
-
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
@@ -229,6 +227,8 @@
 	/* clock management */
 	struct clk	*clk_ahb;
 	struct clk	*clk_mmc;
+	struct clk	*clk_sample;
+	struct clk	*clk_output;
 
 	/* irq */
 	spinlock_t	lock;
@@ -653,26 +653,31 @@
 
 	/* determine delays */
 	if (rate <= 400000) {
-		oclk_dly = 0;
-		sclk_dly = 7;
+		oclk_dly = 180;
+		sclk_dly = 42;
 	} else if (rate <= 25000000) {
-		oclk_dly = 0;
-		sclk_dly = 5;
+		oclk_dly = 180;
+		sclk_dly = 75;
 	} else if (rate <= 50000000) {
 		if (ios->timing == MMC_TIMING_UHS_DDR50) {
-			oclk_dly = 2;
-			sclk_dly = 4;
+			oclk_dly = 60;
+			sclk_dly = 120;
 		} else {
-			oclk_dly = 3;
-			sclk_dly = 5;
+			oclk_dly = 90;
+			sclk_dly = 150;
 		}
+	} else if (rate <= 100000000) {
+		oclk_dly = 6;
+		sclk_dly = 24;
+	} else if (rate <= 200000000) {
+		oclk_dly = 3;
+		sclk_dly = 12;
 	} else {
-		/* rate > 50000000 */
-		oclk_dly = 2;
-		sclk_dly = 4;
+		return -EINVAL;
 	}
 
-	clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly);
+	clk_set_phase(host->clk_sample, sclk_dly);
+	clk_set_phase(host->clk_output, oclk_dly);
 
 	return sunxi_mmc_oclk_onoff(host, 1);
 }
@@ -913,6 +918,18 @@
 		return PTR_ERR(host->clk_mmc);
 	}
 
+	host->clk_output = devm_clk_get(&pdev->dev, "output");
+	if (IS_ERR(host->clk_output)) {
+		dev_err(&pdev->dev, "Could not get output clock\n");
+		return PTR_ERR(host->clk_output);
+	}
+
+	host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+	if (IS_ERR(host->clk_sample)) {
+		dev_err(&pdev->dev, "Could not get sample clock\n");
+		return PTR_ERR(host->clk_sample);
+	}
+
 	host->reset = devm_reset_control_get(&pdev->dev, "ahb");
 
 	ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@
 		goto error_disable_clk_ahb;
 	}
 
+	ret = clk_prepare_enable(host->clk_output);
+	if (ret) {
+		dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
+		goto error_disable_clk_mmc;
+	}
+
+	ret = clk_prepare_enable(host->clk_sample);
+	if (ret) {
+		dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
+		goto error_disable_clk_output;
+	}
+
 	if (!IS_ERR(host->reset)) {
 		ret = reset_control_deassert(host->reset);
 		if (ret) {
 			dev_err(&pdev->dev, "reset err %d\n", ret);
-			goto error_disable_clk_mmc;
+			goto error_disable_clk_sample;
 		}
 	}
 
@@ -950,6 +979,10 @@
 error_assert_reset:
 	if (!IS_ERR(host->reset))
 		reset_control_assert(host->reset);
+error_disable_clk_sample:
+	clk_disable_unprepare(host->clk_sample);
+error_disable_clk_output:
+	clk_disable_unprepare(host->clk_output);
 error_disable_clk_mmc:
 	clk_disable_unprepare(host->clk_mmc);
 error_disable_clk_ahb:
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b76a17..5897d8d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -526,6 +526,7 @@
 
 config MTD_NAND_HISI504
 	tristate "Support for NAND controller on Hisilicon SoC Hip04"
+	depends on HAS_DMA
 	help
 	  Enables support for NAND controller on Hisilicon SoC Hip04.
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 96b0b1d..10b1f7a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -480,6 +480,42 @@
 	nand_writel(info, NDCR, ndcr | int_mask);
 }
 
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+	if (info->ecc_bch) {
+		int timeout;
+
+		/*
+		 * According to the datasheet, when reading from NDDB
+		 * with BCH enabled, after each 32 bytes reads, we
+		 * have to make sure that the NDSR.RDDREQ bit is set.
+		 *
+		 * Drain the FIFO 8 32 bits reads at a time, and skip
+		 * the polling on the last read.
+		 */
+		while (len > 8) {
+			__raw_readsl(info->mmio_base + NDDB, data, 8);
+
+			for (timeout = 0;
+			     !(nand_readl(info, NDSR) & NDSR_RDDREQ);
+			     timeout++) {
+				if (timeout >= 5) {
+					dev_err(&info->pdev->dev,
+						"Timeout on RDDREQ while draining the FIFO\n");
+					return;
+				}
+
+				mdelay(1);
+			}
+
+			data += 32;
+			len -= 8;
+		}
+	}
+
+	__raw_readsl(info->mmio_base + NDDB, data, len);
+}
+
 static void handle_data_pio(struct pxa3xx_nand_info *info)
 {
 	unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@
 				      DIV_ROUND_UP(info->oob_size, 4));
 		break;
 	case STATE_PIO_READING:
-		__raw_readsl(info->mmio_base + NDDB,
-			     info->data_buff + info->data_buff_pos,
-			     DIV_ROUND_UP(do_bytes, 4));
+		drain_fifo(info,
+			   info->data_buff + info->data_buff_pos,
+			   DIV_ROUND_UP(do_bytes, 4));
 
 		if (info->oob_size > 0)
-			__raw_readsl(info->mmio_base + NDDB,
-				     info->oob_buff + info->oob_buff_pos,
-				     DIV_ROUND_UP(info->oob_size, 4));
+			drain_fifo(info,
+				   info->oob_buff + info->oob_buff_pos,
+				   DIV_ROUND_UP(info->oob_size, 4));
 		break;
 	default:
 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@
 	int ret, irq, cs;
 
 	pdata = dev_get_platdata(&pdev->dev);
+	if (pdata->num_cs <= 0)
+		return -ENODEV;
 	info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
 			    sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
 	if (!info)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673eb..df51d60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@
       making it transparent to the connected L2 switch.
 
       Ipvlan devices can be added using the "ip" command from the
-      iproute2 package starting with the iproute2-X.Y.ZZ release:
+      iproute2 package starting with the iproute2-3.19 release:
 
       "ip link add link <main-dev> [ NAME ] type ipvlan"
 
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5..dc6b78e 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@
 
 config LTPC
 	tristate "Apple/Farallon LocalTalk PC support"
-	depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+	depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
 	help
 	  This allows you to use the AppleTalk PC card to connect to LocalTalk
 	  networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aa..58808f6 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@
 
 config CAN_XILINXCAN
 	tristate "Xilinx CAN"
-	depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+	depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
 	depends on COMMON_CLK && HAS_IOMEM
 	---help---
 	  Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c82e02..b0f6924 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -579,6 +579,10 @@
 	skb->pkt_type = PACKET_BROADCAST;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+
 	can_skb_reserve(skb);
 	can_skb_prv(skb)->ifindex = dev->ifindex;
 
@@ -603,6 +607,10 @@
 	skb->pkt_type = PACKET_BROADCAST;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+
 	can_skb_reserve(skb);
 	can_skb_prv(skb)->ifindex = dev->ifindex;
 
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 2928f70..e97a08c 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,8 @@
  * Copyright (C) 2015 Valeo S.A.
  */
 
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
 #include <linux/completion.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
@@ -466,10 +468,11 @@
 struct kvaser_usb_net_priv {
 	struct can_priv can;
 
-	atomic_t active_tx_urbs;
-	struct usb_anchor tx_submitted;
+	spinlock_t tx_contexts_lock;
+	int active_tx_contexts;
 	struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
 
+	struct usb_anchor tx_submitted;
 	struct completion start_comp, stop_comp;
 
 	struct kvaser_usb *dev;
@@ -584,8 +587,15 @@
 		while (pos <= actual_len - MSG_HEADER_LEN) {
 			tmp = buf + pos;
 
-			if (!tmp->len)
-				break;
+			/* Handle messages crossing the USB endpoint max packet
+			 * size boundary. Check kvaser_usb_read_bulk_callback()
+			 * for further details.
+			 */
+			if (tmp->len == 0) {
+				pos = round_up(pos,
+					       dev->bulk_in->wMaxPacketSize);
+				continue;
+			}
 
 			if (pos + tmp->len > actual_len) {
 				dev_err(dev->udev->dev.parent,
@@ -686,6 +696,7 @@
 	struct kvaser_usb_net_priv *priv;
 	struct sk_buff *skb;
 	struct can_frame *cf;
+	unsigned long flags;
 	u8 channel, tid;
 
 	channel = msg->u.tx_acknowledge_header.channel;
@@ -729,12 +740,15 @@
 
 	stats->tx_packets++;
 	stats->tx_bytes += context->dlc;
+
+	spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
 	can_get_echo_skb(priv->netdev, context->echo_index);
-
 	context->echo_index = MAX_TX_URBS;
-	atomic_dec(&priv->active_tx_urbs);
-
+	--priv->active_tx_contexts;
 	netif_wake_queue(priv->netdev);
+
+	spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 }
 
 static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -787,7 +801,6 @@
 		netdev_err(netdev, "Error transmitting URB\n");
 		usb_unanchor_urb(urb);
 		usb_free_urb(urb);
-		kfree(buf);
 		return err;
 	}
 
@@ -796,17 +809,6 @@
 	return 0;
 }
 
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
-	int i;
-
-	usb_kill_anchored_urbs(&priv->tx_submitted);
-	atomic_set(&priv->active_tx_urbs, 0);
-
-	for (i = 0; i < MAX_TX_URBS; i++)
-		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-}
-
 static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
 						 const struct kvaser_usb_error_summary *es,
 						 struct can_frame *cf)
@@ -1317,8 +1319,19 @@
 	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
 		msg = urb->transfer_buffer + pos;
 
-		if (!msg->len)
-			break;
+		/* The Kvaser firmware can only read and write messages that
+		 * does not cross the USB's endpoint wMaxPacketSize boundary.
+		 * If a follow-up command crosses such boundary, firmware puts
+		 * a placeholder zero-length command in its place then aligns
+		 * the real command to the next max packet size.
+		 *
+		 * Handle such cases or we're going to miss a significant
+		 * number of events in case of a heavy rx load on the bus.
+		 */
+		if (msg->len == 0) {
+			pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+			continue;
+		}
 
 		if (pos + msg->len > urb->actual_length) {
 			dev_err(dev->udev->dev.parent, "Format error\n");
@@ -1326,7 +1339,6 @@
 		}
 
 		kvaser_usb_handle_message(dev, msg);
-
 		pos += msg->len;
 	}
 
@@ -1498,6 +1510,24 @@
 	return err;
 }
 
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+	int i;
+
+	priv->active_tx_contexts = 0;
+	for (i = 0; i < MAX_TX_URBS; i++)
+		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+	usb_kill_anchored_urbs(&priv->tx_submitted);
+	kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
 static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
 {
 	int i;
@@ -1615,9 +1645,9 @@
 	struct urb *urb;
 	void *buf;
 	struct kvaser_msg *msg;
-	int i, err;
-	int ret = NETDEV_TX_OK;
+	int i, err, ret = NETDEV_TX_OK;
 	u8 *msg_tx_can_flags = NULL;		/* GCC */
+	unsigned long flags;
 
 	if (can_dropped_invalid_skb(netdev, skb))
 		return NETDEV_TX_OK;
@@ -1634,7 +1664,7 @@
 	if (!buf) {
 		stats->tx_dropped++;
 		dev_kfree_skb(skb);
-		goto nobufmem;
+		goto freeurb;
 	}
 
 	msg = buf;
@@ -1671,22 +1701,32 @@
 	if (cf->can_id & CAN_RTR_FLAG)
 		*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
 
+	spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 	for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
 		if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
 			context = &priv->tx_contexts[i];
+
+			context->echo_index = i;
+			can_put_echo_skb(skb, netdev, context->echo_index);
+			++priv->active_tx_contexts;
+			if (priv->active_tx_contexts >= MAX_TX_URBS)
+				netif_stop_queue(netdev);
+
 			break;
 		}
 	}
+	spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
 	/* This should never happen; it implies a flow control bug */
 	if (!context) {
 		netdev_warn(netdev, "cannot find free context\n");
+
+		kfree(buf);
 		ret =  NETDEV_TX_BUSY;
-		goto releasebuf;
+		goto freeurb;
 	}
 
 	context->priv = priv;
-	context->echo_index = i;
 	context->dlc = cf->can_dlc;
 
 	msg->u.tx_can.tid = context->echo_index;
@@ -1698,18 +1738,17 @@
 			  kvaser_usb_write_bulk_callback, context);
 	usb_anchor_urb(urb, &priv->tx_submitted);
 
-	can_put_echo_skb(skb, netdev, context->echo_index);
-
-	atomic_inc(&priv->active_tx_urbs);
-
-	if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
-		netif_stop_queue(netdev);
-
 	err = usb_submit_urb(urb, GFP_ATOMIC);
 	if (unlikely(err)) {
-		can_free_echo_skb(netdev, context->echo_index);
+		spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
-		atomic_dec(&priv->active_tx_urbs);
+		can_free_echo_skb(netdev, context->echo_index);
+		context->echo_index = MAX_TX_URBS;
+		--priv->active_tx_contexts;
+		netif_wake_queue(netdev);
+
+		spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
 		usb_unanchor_urb(urb);
 
 		stats->tx_dropped++;
@@ -1719,16 +1758,12 @@
 		else
 			netdev_warn(netdev, "Failed tx_urb %d\n", err);
 
-		goto releasebuf;
+		goto freeurb;
 	}
 
-	usb_free_urb(urb);
+	ret = NETDEV_TX_OK;
 
-	return NETDEV_TX_OK;
-
-releasebuf:
-	kfree(buf);
-nobufmem:
+freeurb:
 	usb_free_urb(urb);
 	return ret;
 }
@@ -1840,7 +1875,7 @@
 	struct kvaser_usb *dev = usb_get_intfdata(intf);
 	struct net_device *netdev;
 	struct kvaser_usb_net_priv *priv;
-	int i, err;
+	int err;
 
 	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
 	if (err)
@@ -1854,19 +1889,17 @@
 
 	priv = netdev_priv(netdev);
 
+	init_usb_anchor(&priv->tx_submitted);
 	init_completion(&priv->start_comp);
 	init_completion(&priv->stop_comp);
 
-	init_usb_anchor(&priv->tx_submitted);
-	atomic_set(&priv->active_tx_urbs, 0);
-
-	for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
-		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-
 	priv->dev = dev;
 	priv->netdev = netdev;
 	priv->channel = channel;
 
+	spin_lock_init(&priv->tx_contexts_lock);
+	kvaser_usb_reset_tx_urb_contexts(priv);
+
 	priv->can.state = CAN_STATE_STOPPED;
 	priv->can.clock.freq = CAN_USB_CLOCK;
 	priv->can.bittiming_const = &kvaser_usb_bittiming_const;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 962c3f0..0bac0f1 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -879,6 +879,10 @@
 
 		pdev->usb_if = ppdev->usb_if;
 		pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
+
+		/* do a copy of the ctrlmode[_supported] too */
+		dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
+		dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
 	}
 
 	pdev->usb_if->dev[dev->ctrl_idx] = dev;
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index ee9f650..7b7053d 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -105,8 +105,8 @@
 {									\
 	u32 indir, dir;							\
 	spin_lock(&priv->indir_lock);					\
-	indir = reg_readl(priv, REG_DIR_DATA_READ);			\
 	dir = __raw_readl(priv->name + off);				\
+	indir = reg_readl(priv, REG_DIR_DATA_READ);			\
 	spin_unlock(&priv->indir_lock);					\
 	return (u64)indir << 32 | dir;					\
 }									\
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05..ec6eac1 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@
     link->open++;
 
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ax_open(dev);
 } /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d..2777289 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@
 
     info->phy_id = info->eth_phy;
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ei_open(dev);
 } /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c..6725dc0 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@
 	u16 pktlength;
 	u16 pktstatus;
 
-	while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+	while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+	       (count < limit))  {
 		pktstatus = rxstatus >> 16;
 		pktlength = rxstatus & 0xffff;
 
@@ -491,28 +492,27 @@
 	struct altera_tse_private *priv =
 			container_of(napi, struct altera_tse_private, napi);
 	int rxcomplete = 0;
-	int txcomplete = 0;
 	unsigned long int flags;
 
-	txcomplete = tse_tx_complete(priv);
+	tse_tx_complete(priv);
 
 	rxcomplete = tse_rx(priv, budget);
 
-	if (rxcomplete >= budget || txcomplete > 0)
-		return rxcomplete;
+	if (rxcomplete < budget) {
 
-	napi_gro_flush(napi, false);
-	__napi_complete(napi);
+		napi_gro_flush(napi, false);
+		__napi_complete(napi);
 
-	netdev_dbg(priv->dev,
-		   "NAPI Complete, did %d packets with budget %d\n",
-		   txcomplete+rxcomplete, budget);
+		netdev_dbg(priv->dev,
+			   "NAPI Complete, did %d packets with budget %d\n",
+			   rxcomplete, budget);
 
-	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
-	priv->dmaops->enable_rxirq(priv);
-	priv->dmaops->enable_txirq(priv);
-	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
-	return rxcomplete + txcomplete;
+		spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+		priv->dmaops->enable_rxirq(priv);
+		priv->dmaops->enable_txirq(priv);
+		spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+	}
+	return rxcomplete;
 }
 
 /* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@
 {
 	struct net_device *dev = dev_id;
 	struct altera_tse_private *priv;
-	unsigned long int flags;
 
 	if (unlikely(!dev)) {
 		pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@
 	}
 	priv = netdev_priv(dev);
 
-	/* turn off desc irqs and enable napi rx */
-	spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
-
-	if (likely(napi_schedule_prep(&priv->napi))) {
-		priv->dmaops->disable_rxirq(priv);
-		priv->dmaops->disable_txirq(priv);
-		__napi_schedule(&priv->napi);
-	}
-
+	spin_lock(&priv->rxdma_irq_lock);
 	/* reset IRQs */
 	priv->dmaops->clear_rxirq(priv);
 	priv->dmaops->clear_txirq(priv);
+	spin_unlock(&priv->rxdma_irq_lock);
 
-	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+	if (likely(napi_schedule_prep(&priv->napi))) {
+		spin_lock(&priv->rxdma_irq_lock);
+		priv->dmaops->disable_rxirq(priv);
+		priv->dmaops->disable_txirq(priv);
+		spin_unlock(&priv->rxdma_irq_lock);
+		__napi_schedule(&priv->napi);
+	}
+
 
 	return IRQ_HANDLED;
 }
@@ -1399,7 +1398,7 @@
 	}
 
 	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
-				 &priv->rx_fifo_depth)) {
+				 &priv->tx_fifo_depth)) {
 		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
 		ret = -ENXIO;
 		goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d440..885b02b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@
 	}
 }
 
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+	struct xgbe_channel *channel;
+	struct net_device *netdev = pdata->netdev;
+	unsigned int i;
+	int ret;
+
+	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+			       netdev->name, pdata);
+	if (ret) {
+		netdev_alert(netdev, "error requesting irq %d\n",
+			     pdata->dev_irq);
+		return ret;
+	}
+
+	if (!pdata->per_channel_irq)
+		return 0;
+
+	channel = pdata->channel;
+	for (i = 0; i < pdata->channel_count; i++, channel++) {
+		snprintf(channel->dma_irq_name,
+			 sizeof(channel->dma_irq_name) - 1,
+			 "%s-TxRx-%u", netdev_name(netdev),
+			 channel->queue_index);
+
+		ret = devm_request_irq(pdata->dev, channel->dma_irq,
+				       xgbe_dma_isr, 0,
+				       channel->dma_irq_name, channel);
+		if (ret) {
+			netdev_alert(netdev, "error requesting irq %d\n",
+				     channel->dma_irq);
+			goto err_irq;
+		}
+	}
+
+	return 0;
+
+err_irq:
+	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+	for (i--, channel--; i < pdata->channel_count; i--, channel--)
+		devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+	return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+	struct xgbe_channel *channel;
+	unsigned int i;
+
+	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+	if (!pdata->per_channel_irq)
+		return;
+
+	channel = pdata->channel;
+	for (i = 0; i < pdata->channel_count; i++, channel++)
+		devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@
 		return -EINVAL;
 	}
 
-	phy_stop(pdata->phydev);
-
 	spin_lock_irqsave(&pdata->lock, flags);
 
 	if (caller == XGMAC_DRIVER_CONTEXT)
 		netif_device_detach(netdev);
 
 	netif_tx_stop_all_queues(netdev);
-	xgbe_napi_disable(pdata, 0);
 
-	/* Powerdown Tx/Rx */
 	hw_if->powerdown_tx(pdata);
 	hw_if->powerdown_rx(pdata);
 
+	xgbe_napi_disable(pdata, 0);
+
+	phy_stop(pdata->phydev);
+
 	pdata->power_down = 1;
 
 	spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@
 
 	phy_start(pdata->phydev);
 
-	/* Enable Tx/Rx */
+	xgbe_napi_enable(pdata, 0);
+
 	hw_if->powerup_tx(pdata);
 	hw_if->powerup_rx(pdata);
 
 	if (caller == XGMAC_DRIVER_CONTEXT)
 		netif_device_attach(netdev);
 
-	xgbe_napi_enable(pdata, 0);
 	netif_tx_start_all_queues(netdev);
 
 	spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@
 {
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct net_device *netdev = pdata->netdev;
+	int ret;
 
 	DBGPR("-->xgbe_start\n");
 
@@ -884,17 +947,31 @@
 
 	phy_start(pdata->phydev);
 
+	xgbe_napi_enable(pdata, 1);
+
+	ret = xgbe_request_irqs(pdata);
+	if (ret)
+		goto err_napi;
+
 	hw_if->enable_tx(pdata);
 	hw_if->enable_rx(pdata);
 
 	xgbe_init_tx_timers(pdata);
 
-	xgbe_napi_enable(pdata, 1);
 	netif_tx_start_all_queues(netdev);
 
 	DBGPR("<--xgbe_start\n");
 
 	return 0;
+
+err_napi:
+	xgbe_napi_disable(pdata, 1);
+
+	phy_stop(pdata->phydev);
+
+	hw_if->exit(pdata);
+
+	return ret;
 }
 
 static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@
 
 	DBGPR("-->xgbe_stop\n");
 
-	phy_stop(pdata->phydev);
-
 	netif_tx_stop_all_queues(netdev);
-	xgbe_napi_disable(pdata, 1);
 
 	xgbe_stop_tx_timers(pdata);
 
 	hw_if->disable_tx(pdata);
 	hw_if->disable_rx(pdata);
 
+	xgbe_free_irqs(pdata);
+
+	xgbe_napi_disable(pdata, 1);
+
+	phy_stop(pdata->phydev);
+
+	hw_if->exit(pdata);
+
 	channel = pdata->channel;
 	for (i = 0; i < pdata->channel_count; i++, channel++) {
 		if (!channel->tx_ring)
@@ -931,10 +1013,6 @@
 
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
 {
-	struct xgbe_channel *channel;
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
-	unsigned int i;
-
 	DBGPR("-->xgbe_restart_dev\n");
 
 	/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@
 		return;
 
 	xgbe_stop(pdata);
-	synchronize_irq(pdata->dev_irq);
-	if (pdata->per_channel_irq) {
-		channel = pdata->channel;
-		for (i = 0; i < pdata->channel_count; i++, channel++)
-			synchronize_irq(channel->dma_irq);
-	}
 
 	xgbe_free_tx_data(pdata);
 	xgbe_free_rx_data(pdata);
 
-	/* Issue software reset to device */
-	hw_if->exit(pdata);
-
 	xgbe_start(pdata);
 
 	DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@
 static int xgbe_open(struct net_device *netdev)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
-	struct xgbe_channel *channel = NULL;
-	unsigned int i = 0;
 	int ret;
 
 	DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@
 	INIT_WORK(&pdata->restart_work, xgbe_restart);
 	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
-	/* Request interrupts */
-	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
-			       netdev->name, pdata);
-	if (ret) {
-		netdev_alert(netdev, "error requesting irq %d\n",
-			     pdata->dev_irq);
-		goto err_rings;
-	}
-
-	if (pdata->per_channel_irq) {
-		channel = pdata->channel;
-		for (i = 0; i < pdata->channel_count; i++, channel++) {
-			snprintf(channel->dma_irq_name,
-				 sizeof(channel->dma_irq_name) - 1,
-				 "%s-TxRx-%u", netdev_name(netdev),
-				 channel->queue_index);
-
-			ret = devm_request_irq(pdata->dev, channel->dma_irq,
-					       xgbe_dma_isr, 0,
-					       channel->dma_irq_name, channel);
-			if (ret) {
-				netdev_alert(netdev,
-					     "error requesting irq %d\n",
-					     channel->dma_irq);
-				goto err_irq;
-			}
-		}
-	}
-
 	ret = xgbe_start(pdata);
 	if (ret)
-		goto err_start;
+		goto err_rings;
 
 	DBGPR("<--xgbe_open\n");
 
 	return 0;
 
-err_start:
-	hw_if->exit(pdata);
-
-err_irq:
-	if (pdata->per_channel_irq) {
-		/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
-		for (i--, channel--; i < pdata->channel_count; i--, channel--)
-			devm_free_irq(pdata->dev, channel->dma_irq, channel);
-	}
-
-	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
 err_rings:
 	desc_if->free_ring_resources(pdata);
 
@@ -1399,30 +1424,16 @@
 static int xgbe_close(struct net_device *netdev)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
-	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_desc_if *desc_if = &pdata->desc_if;
-	struct xgbe_channel *channel;
-	unsigned int i;
 
 	DBGPR("-->xgbe_close\n");
 
 	/* Stop the device */
 	xgbe_stop(pdata);
 
-	/* Issue software reset to device */
-	hw_if->exit(pdata);
-
 	/* Free the ring descriptors and buffers */
 	desc_if->free_ring_resources(pdata);
 
-	/* Release the interrupts */
-	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-	if (pdata->per_channel_irq) {
-		channel = pdata->channel;
-		for (i = 0; i < pdata->channel_count; i++, channel++)
-			devm_free_irq(pdata->dev, channel->dma_irq, channel);
-	}
-
 	/* Free the channel and ring structures */
 	xgbe_free_channels(pdata);
 
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97f..b927021 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@
 	if (!xgene_ring_mgr_init(pdata))
 		return -ENODEV;
 
-	if (!efi_enabled(EFI_BOOT)) {
+	if (pdata->clk) {
 		clk_prepare_enable(pdata->clk);
 		clk_disable_unprepare(pdata->clk);
 		clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b2..635a83b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xgene_enet_acpi_match[] = {
 	{ "APMC0D05", },
+	{ "APMC0D30", },
+	{ "APMC0D31", },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@
 #ifdef CONFIG_OF
 static struct of_device_id xgene_enet_of_match[] = {
 	{.compatible = "apm,xgene-enet",},
+	{.compatible = "apm,xgene1-sgenet",},
+	{.compatible = "apm,xgene1-xgenet",},
 	{},
 };
 
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d3..a7f2cc3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@
 {
 	struct bcm_enet_priv *priv;
 	struct net_device *dev;
-	int tx_work_done, rx_work_done;
+	int rx_work_done;
 
 	priv = container_of(napi, struct bcm_enet_priv, napi);
 	dev = priv->net_dev;
@@ -498,14 +498,14 @@
 			 ENETDMAC_IR, priv->tx_chan);
 
 	/* reclaim sent skb */
-	tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+	bcm_enet_tx_reclaim(dev, 0);
 
 	spin_lock(&priv->rx_lock);
 	rx_work_done = bcm_enet_receive_queue(dev, budget);
 	spin_unlock(&priv->rx_lock);
 
-	if (rx_work_done >= budget || tx_work_done > 0) {
-		/* rx/tx queue is not yet empty/clean */
+	if (rx_work_done >= budget) {
+		/* rx queue is not yet empty/clean */
 		return rx_work_done;
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4..783543a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@
 	/* RBUF misc statistics */
 	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
 	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
-	STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-	STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-	STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@
 		s = &bcm_sysport_gstrings_stats[i];
 		switch (s->type) {
 		case BCM_SYSPORT_STAT_NETDEV:
+		case BCM_SYSPORT_STAT_SOFT:
 			continue;
 		case BCM_SYSPORT_STAT_MIB_RX:
 		case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417..7e3d87a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@
 	BCM_SYSPORT_STAT_RUNT,
 	BCM_SYSPORT_STAT_RXCHK,
 	BCM_SYSPORT_STAT_RBUF,
+	BCM_SYSPORT_STAT_SOFT,
 };
 
 /* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@
 #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
 #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
 #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
 
 #define STAT_RXCHK(str, m, ofs) { \
 	.stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe0..0469f72 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@
 	slot->skb = skb;
 	slot->dma_addr = dma_addr;
 
-	if (slot->dma_addr & 0xC0000000)
-		bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
 	return 0;
 }
 
@@ -505,8 +502,6 @@
 				  ring->mmio_base);
 			goto err_dma_free;
 		}
-		if (ring->dma_base & 0xC0000000)
-			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
 						      BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@
 			err = -ENOMEM;
 			goto err_dma_free;
 		}
-		if (ring->dma_base & 0xC0000000)
-			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
 
 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
 						      BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d..996e215 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12722,6 +12722,9 @@
 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 			       PCICFG_VENDOR_ID_OFFSET);
 
+	/* Set PCIe reset type to fundamental for EEH recovery */
+	pdev->needs_freset = 1;
+
 	/* AER (Advanced Error reporting) configuration */
 	rc = pci_enable_pcie_error_reporting(pdev);
 	if (!rc)
@@ -12766,7 +12769,7 @@
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
-	if (!CHIP_IS_E1x(bp)) {
+	if (!chip_is_e1x) {
 		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
 				    NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
 		dev->hw_enc_features =
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46b..6befde6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@
 	BCMGENET_STAT_MIB_TX,
 	BCMGENET_STAT_RUNT,
 	BCMGENET_STAT_MISC,
+	BCMGENET_STAT_SOFT,
 };
 
 struct bcmgenet_stats {
@@ -515,6 +516,7 @@
 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
 
 #define STAT_GENET_MISC(str, m, offset) { \
 	.stat_string = str, \
@@ -614,9 +616,9 @@
 			UMAC_RBUF_OVFL_CNT),
 	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
-	STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-	STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-	STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@
 		s = &bcmgenet_gstrings_stats[i];
 		switch (s->type) {
 		case BCMGENET_STAT_NETDEV:
+		case BCMGENET_STAT_SOFT:
 			continue;
 		case BCMGENET_STAT_MIB_RX:
 		case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@
 }
 
 /* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
-				  struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+					  struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	int last_tx_cn, last_c_index, num_tx_bds;
 	struct enet_cb *tx_cb_ptr;
 	struct netdev_queue *txq;
+	unsigned int pkts_compl = 0;
 	unsigned int bds_compl;
 	unsigned int c_index;
 
@@ -1005,6 +1009,7 @@
 		tx_cb_ptr = ring->cbs + last_c_index;
 		bds_compl = 0;
 		if (tx_cb_ptr->skb) {
+			pkts_compl++;
 			bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
 			dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@
 		last_c_index &= (num_tx_bds - 1);
 	}
 
-	if (ring->free_bds > (MAX_SKB_FRAGS + 1))
-		ring->int_disable(priv, ring);
-
-	if (netif_tx_queue_stopped(txq))
-		netif_tx_wake_queue(txq);
+	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+		if (netif_tx_queue_stopped(txq))
+			netif_tx_wake_queue(txq);
+	}
 
 	ring->c_index = c_index;
+
+	return pkts_compl;
 }
 
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
 				struct bcmgenet_tx_ring *ring)
 {
+	unsigned int released;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ring->lock, flags);
-	__bcmgenet_tx_reclaim(dev, ring);
+	released = __bcmgenet_tx_reclaim(dev, ring);
 	spin_unlock_irqrestore(&ring->lock, flags);
+
+	return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+	struct bcmgenet_tx_ring *ring =
+		container_of(napi, struct bcmgenet_tx_ring, napi);
+	unsigned int work_done = 0;
+
+	work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+	if (work_done == 0) {
+		napi_complete(napi);
+		ring->int_enable(ring->priv, ring);
+
+		return 0;
+	}
+
+	return budget;
 }
 
 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@
 	bcmgenet_tdma_ring_writel(priv, ring->index,
 				  ring->prod_index, TDMA_PROD_INDEX);
 
-	if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
 		netif_tx_stop_queue(txq);
-		ring->int_enable(priv, ring);
-	}
 
 out:
 	spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@
 	struct device *kdev = &priv->pdev->dev;
 	int ret;
 	u32 reg, cpu_mask_clear;
+	int index;
 
 	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1647,7 +1673,7 @@
 
 	bcmgenet_intr_disable(priv);
 
-	cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+	cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
 
 	dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
@@ -1674,6 +1700,10 @@
 
 	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
+	for (index = 0; index < priv->hw_params->tx_queues; index++)
+		bcmgenet_intrl2_1_writel(priv, (1 << index),
+					 INTRL2_CPU_MASK_CLEAR);
+
 	/* Enable rx/tx engine.*/
 	dev_dbg(kdev, "done init umac\n");
 
@@ -1693,6 +1723,8 @@
 	unsigned int first_bd;
 
 	spin_lock_init(&ring->lock);
+	ring->priv = priv;
+	netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
 	ring->index = index;
 	if (index == DESC_INDEX) {
 		ring->queue = 0;
@@ -1738,6 +1770,17 @@
 				  TDMA_WRITE_PTR);
 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
 				  DMA_END_ADDR);
+
+	napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+				  unsigned int index)
+{
+	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+	napi_disable(&ring->napi);
+	netif_napi_del(&ring->napi);
 }
 
 /* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@
 	return ret;
 }
 
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
 	int i;
 
@@ -1926,6 +1969,18 @@
 	kfree(priv->tx_cbs);
 }
 
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+	int i;
+
+	bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+	for (i = 0; i < priv->hw_params->tx_queues; i++)
+		bcmgenet_fini_tx_ring(priv, i);
+
+	__bcmgenet_fini_dma(priv);
+}
+
 /* init_edma: Initialize DMA control register */
 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 {
@@ -1952,7 +2007,7 @@
 	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
 			       GFP_KERNEL);
 	if (!priv->tx_cbs) {
-		bcmgenet_fini_dma(priv);
+		__bcmgenet_fini_dma(priv);
 		return -ENOMEM;
 	}
 
@@ -1975,9 +2030,6 @@
 			struct bcmgenet_priv, napi);
 	unsigned int work_done;
 
-	/* tx reclaim */
-	bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
 	work_done = bcmgenet_desc_rx(priv, budget);
 
 	/* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@
 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 {
 	struct bcmgenet_priv *priv = dev_id;
+	struct bcmgenet_tx_ring *ring;
 	unsigned int index;
 
 	/* Save irq status for bottom-half processing. */
 	priv->irq1_stat =
 		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
-		~priv->int1_mask;
+		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
 	/* clear interrupts */
 	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
 		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
 	/* Check the MBDONE interrupts.
 	 * packet is done, reclaim descriptors
 	 */
-	if (priv->irq1_stat & 0x0000ffff) {
-		index = 0;
-		for (index = 0; index < 16; index++) {
-			if (priv->irq1_stat & (1 << index))
-				bcmgenet_tx_reclaim(priv->dev,
-						    &priv->tx_rings[index]);
+	for (index = 0; index < priv->hw_params->tx_queues; index++) {
+		if (!(priv->irq1_stat & BIT(index)))
+			continue;
+
+		ring = &priv->tx_rings[index];
+
+		if (likely(napi_schedule_prep(&ring->napi))) {
+			ring->int_disable(priv, ring);
+			__napi_schedule(&ring->napi);
 		}
 	}
+
 	return IRQ_HANDLED;
 }
 
@@ -2075,8 +2133,12 @@
 	}
 	if (priv->irq0_stat &
 			(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
-		/* Tx reclaim */
-		bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+		struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+		if (likely(napi_schedule_prep(&ring->napi))) {
+			ring->int_disable(priv, ring);
+			__napi_schedule(&ring->napi);
+		}
 	}
 	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
 				UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec..0d370d1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@
 
 struct bcmgenet_tx_ring {
 	spinlock_t	lock;		/* ring lock */
+	struct napi_struct napi;	/* NAPI per tx queue */
 	unsigned int	index;		/* ring index */
 	unsigned int	queue;		/* queue index */
 	struct enet_cb	*cbs;		/* tx ring buffer control block*/
@@ -534,6 +535,7 @@
 			   struct bcmgenet_tx_ring *);
 	void (*int_disable)(struct bcmgenet_priv *priv,
 			    struct bcmgenet_tx_ring *);
+	struct bcmgenet_priv *priv;
 };
 
 /* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d7..b971229 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@
 	if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
 		return -EINVAL;
 
+	reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
 	if (wol->wolopts & WAKE_MAGICSECURE) {
 		bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
 				     UMAC_MPD_PW_MS);
 		bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
 				     UMAC_MPD_PW_LS);
-		reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
 		reg |= MPD_PW_EN;
-		bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+	} else {
+		reg &= ~MPD_PW_EN;
 	}
+	bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
 
 	/* Flag the device and relevant IRQ as wakeup capable */
 	if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e..81d4153 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@
 };
 
 #if defined(CONFIG_OF)
-static struct macb_config pc302gem_config = {
+static const struct macb_config pc302gem_config = {
 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 	.dma_burst_length = 16,
 };
 
-static struct macb_config sama5d3_config = {
+static const struct macb_config sama5d3_config = {
 	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
 	.dma_burst_length = 16,
 };
 
-static struct macb_config sama5d4_config = {
+static const struct macb_config sama5d4_config = {
 	.caps = 0,
 	.dma_burst_length = 4,
 };
@@ -2154,7 +2154,7 @@
 	if (bp->pdev->dev.of_node) {
 		match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
 		if (match && match->data) {
-			config = (const struct macb_config *)match->data;
+			config = match->data;
 
 			bp->caps = config->caps;
 			/*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080..ff85619 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
 
 /* Bitfields in MID */
 #define MACB_IDNUM_OFFSET			16
-#define MACB_IDNUM_SIZE				16
+#define MACB_IDNUM_SIZE				12
 #define MACB_REV_OFFSET				0
 #define MACB_REV_SIZE				16
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a84..c308429 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@
 }
 
 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
-				   int addr_len)
+				   u8 v6)
 {
-	return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
-				ipv6_clip_hash(ctbl, addr);
+	return v6 ? ipv6_clip_hash(ctbl, addr) :
+			ipv4_clip_hash(ctbl, addr);
 }
 
 static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@
 	struct clip_entry *ce, *cte;
 	u32 *addr = (u32 *)lip;
 	int hash;
-	int addr_len;
-	int ret = 0;
+	int ret = -1;
 
 	if (!ctbl)
 		return 0;
 
-	if (v6)
-		addr_len = 16;
-	else
-		addr_len = 4;
-
-	hash = clip_addr_hash(ctbl, addr, addr_len);
+	hash = clip_addr_hash(ctbl, addr, v6);
 
 	read_lock_bh(&ctbl->lock);
 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-		if (addr_len == cte->addr_len &&
-		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
+		if (cte->addr6.sin6_family == AF_INET6 && v6)
+			ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+				     sizeof(struct in6_addr));
+		else if (cte->addr.sin_family == AF_INET && !v6)
+			ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+				     sizeof(struct in_addr));
+		if (!ret) {
 			ce = cte;
 			read_unlock_bh(&ctbl->lock);
 			goto found;
@@ -111,15 +110,20 @@
 		spin_lock_init(&ce->lock);
 		atomic_set(&ce->refcnt, 0);
 		atomic_dec(&ctbl->nfree);
-		ce->addr_len = addr_len;
-		memcpy(ce->addr, lip, addr_len);
 		list_add_tail(&ce->list, &ctbl->hash_list[hash]);
 		if (v6) {
+			ce->addr6.sin6_family = AF_INET6;
+			memcpy(ce->addr6.sin6_addr.s6_addr,
+			       lip, sizeof(struct in6_addr));
 			ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
 			if (ret) {
 				write_unlock_bh(&ctbl->lock);
 				return ret;
 			}
+		} else {
+			ce->addr.sin_family = AF_INET;
+			memcpy((char *)(&ce->addr.sin_addr), lip,
+			       sizeof(struct in_addr));
 		}
 	} else {
 		write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@
 	struct clip_entry *ce, *cte;
 	u32 *addr = (u32 *)lip;
 	int hash;
-	int addr_len;
+	int ret = -1;
 
-	if (v6)
-		addr_len = 16;
-	else
-		addr_len = 4;
-
-	hash = clip_addr_hash(ctbl, addr, addr_len);
+	hash = clip_addr_hash(ctbl, addr, v6);
 
 	read_lock_bh(&ctbl->lock);
 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-		if (addr_len == cte->addr_len &&
-		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
+		if (cte->addr6.sin6_family == AF_INET6 && v6)
+			ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+				     sizeof(struct in6_addr));
+		else if (cte->addr.sin_family == AF_INET && !v6)
+			ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+				     sizeof(struct in_addr));
+		if (!ret) {
 			ce = cte;
 			read_unlock_bh(&ctbl->lock);
 			goto found;
@@ -249,10 +253,7 @@
 	for (i = 0 ; i < ctbl->clipt_size;  ++i) {
 		list_for_each_entry(ce, &ctbl->hash_list[i], list) {
 			ip[0] = '\0';
-			if (ce->addr_len == 16)
-				sprintf(ip, "%pI6c", ce->addr);
-			else
-				sprintf(ip, "%pI4c", ce->addr);
+			sprintf(ip, "%pISc", &ce->addr);
 			seq_printf(seq, "%-25s   %u\n", ip,
 				   atomic_read(&ce->refcnt));
 		}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba01..35eb43c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@
 	spinlock_t lock;	/* Hold while modifying clip reference */
 	atomic_t refcnt;
 	struct list_head list;
-	u32 addr[4];
-	int addr_len;
+	union {
+		struct sockaddr_in addr;
+		struct sockaddr_in6 addr6;
+	};
 };
 
 struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17..97842d0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@
 #define T4_MEMORY_WRITE	0
 #define T4_MEMORY_READ	1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
-		 __be32 *buf, int dir);
+		 void *buf, int dir);
 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
 				  u32 len, __be32 *buf)
 {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b6..1abdfa1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@
  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *	@addr: address within indicated memory type
  *	@len: amount of memory to transfer
- *	@buf: host memory buffer
+ *	@hbuf: host memory buffer
  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@
  *	caller's responsibility to perform appropriate byte order conversions.
  */
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
-		 u32 len, __be32 *buf, int dir)
+		 u32 len, void *hbuf, int dir)
 {
 	u32 pos, offset, resid, memoffset;
 	u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+	u32 *buf;
 
 	/* Argument sanity checks ...
 	 */
-	if (addr & 0x3)
+	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
 		return -EINVAL;
+	buf = (u32 *)hbuf;
 
 	/* It's convenient to be able to handle lengths which aren't a
 	 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@
 
 	/* Transfer data to/from the adapter as long as there's an integral
 	 * number of 32-bit transfers to complete.
+	 *
+	 * A note on Endianness issues:
+	 *
+	 * The "register" reads and writes below from/to the PCI-E Memory
+	 * Window invoke the standard adapter Big-Endian to PCI-E Link
+	 * Little-Endian "swizzel."  As a result, if we have the following
+	 * data in adapter memory:
+	 *
+	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
+	 *     Address:      i+0  i+1  i+2  i+3
+	 *
+	 * Then a read of the adapter memory via the PCI-E Memory Window
+	 * will yield:
+	 *
+	 *     x = readl(i)
+	 *         31                  0
+	 *         [ b3 | b2 | b1 | b0 ]
+	 *
+	 * If this value is stored into local memory on a Little-Endian system
+	 * it will show up correctly in local memory as:
+	 *
+	 *     ( ..., b0, b1, b2, b3, ... )
+	 *
+	 * But on a Big-Endian system, the store will show up in memory
+	 * incorrectly swizzled as:
+	 *
+	 *     ( ..., b3, b2, b1, b0, ... )
+	 *
+	 * So we need to account for this in the reads and writes to the
+	 * PCI-E Memory Window below by undoing the register read/write
+	 * swizzels.
 	 */
 	while (len > 0) {
 		if (dir == T4_MEMORY_READ)
-			*buf++ = (__force __be32) t4_read_reg(adap,
-							mem_base + offset);
+			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+						mem_base + offset));
 		else
 			t4_write_reg(adap, mem_base + offset,
-				     (__force u32) *buf++);
+				     (__force u32)cpu_to_le32(*buf++));
 		offset += sizeof(__be32);
 		len -= sizeof(__be32);
 
@@ -568,15 +601,16 @@
 	 */
 	if (resid) {
 		union {
-			__be32 word;
+			u32 word;
 			char byte[4];
 		} last;
 		unsigned char *bp;
 		int i;
 
 		if (dir == T4_MEMORY_READ) {
-			last.word = (__force __be32) t4_read_reg(adap,
-							mem_base + offset);
+			last.word = le32_to_cpu(
+					(__force __le32)t4_read_reg(adap,
+						mem_base + offset));
 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
 				bp[i] = last.byte[i];
 		} else {
@@ -584,7 +618,7 @@
 			for (i = resid; i < 4; i++)
 				last.byte[i] = 0;
 			t4_write_reg(adap, mem_base + offset,
-				     (__force u32) last.word);
+				     (__force u32)cpu_to_le32(last.word));
 		}
 	}
 
@@ -1086,7 +1120,7 @@
 		}
 
 		/* Installed successfully, update the cached header too. */
-		memcpy(card_fw, fs_fw, sizeof(*card_fw));
+		*card_fw = *fs_fw;
 		card_fw_usable = 1;
 		*reset = 0;	/* already reset as part of load_fw */
 	}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038..a5179bf 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@
 	}
 
 	if (ENIC_TEST_INTR(pba, notify_intr)) {
-		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
 		enic_notify_check(enic);
+		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
 	}
 
 	if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@
 	struct enic *enic = data;
 	unsigned int intr = enic_msix_notify_intr(enic);
 
-	vnic_intr_return_all_credits(&enic->intr[intr]);
 	enic_notify_check(enic);
+	vnic_intr_return_all_credits(&enic->intr[intr]);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556..ed41559 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@
 			       (unsigned int)tp->rx_ring[i].buffer1,
 			       (unsigned int)tp->rx_ring[i].buffer2,
 			       buf[0], buf[1], buf[2]);
-			for (j = 0; buf[j] != 0xee && j < 1600; j++)
+			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
 				if (j < 100)
 					pr_cont(" %02x", buf[j]);
 			pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220..78e1ce0 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@
 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 {
 	struct	fec_enet_private *fep;
-	struct bufdesc *bdp, *bdp_t;
+	struct bufdesc *bdp;
 	unsigned short status;
 	struct	sk_buff	*skb;
 	struct fec_enet_priv_tx_q *txq;
 	struct netdev_queue *nq;
 	int	index = 0;
-	int	i, bdnum;
 	int	entries_free;
 
 	fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@
 		if (bdp == txq->cur_tx)
 			break;
 
-		bdp_t = bdp;
-		bdnum = 1;
-		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
-		skb = txq->tx_skbuff[index];
-		while (!skb) {
-			bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
-			index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
-			skb = txq->tx_skbuff[index];
-			bdnum++;
-		}
-		if (skb_shinfo(skb)->nr_frags &&
-		    (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
-			break;
+		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
 
-		for (i = 0; i < bdnum; i++) {
-			if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-				dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-						 bdp->cbd_datlen, DMA_TO_DEVICE);
-			bdp->cbd_bufaddr = 0;
-			if (i < bdnum - 1)
-				bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-		}
+		skb = txq->tx_skbuff[index];
 		txq->tx_skbuff[index] = NULL;
+		if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+					bdp->cbd_datlen, DMA_TO_DEVICE);
+		bdp->cbd_bufaddr = 0;
+		if (!skb) {
+			bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+			continue;
+		}
 
 		/* Check for errors. */
 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@
 
 			vlan_packet_rcvd = true;
 
-			skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
-						       data, (2 * ETH_ALEN));
+			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
 			skb_pull(skb, VLAN_HLEN);
 		}
 
@@ -1597,7 +1584,7 @@
 	writel(int_events, fep->hwp + FEC_IEVENT);
 	fec_enet_collect_events(fep, int_events);
 
-	if (fep->work_tx || fep->work_rx) {
+	if ((fep->work_tx || fep->work_rx) && fep->link) {
 		ret = IRQ_HANDLED;
 
 		if (napi_schedule_prep(&fep->napi)) {
@@ -3383,7 +3370,6 @@
 		regulator_disable(fep->reg_phy);
 	if (fep->ptp_clock)
 		ptp_clock_unregister(fep->ptp_clock);
-	fec_enet_clk_enable(ndev, false);
 	of_node_put(fep->phy_node);
 	free_netdev(ndev);
 
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df788..7bf3682 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@
 	return 0;
 }
 
+static int gfar_of_group_count(struct device_node *np)
+{
+	struct device_node *child;
+	int num = 0;
+
+	for_each_available_child_of_node(np, child)
+		if (!of_node_cmp(child->name, "queue-group"))
+			num++;
+
+	return num;
+}
+
 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 {
 	const char *model;
@@ -784,7 +796,7 @@
 		num_rx_qs = 1;
 	} else { /* MQ_MG_MODE */
 		/* get the actual number of supported groups */
-		unsigned int num_grps = of_get_available_child_count(np);
+		unsigned int num_grps = gfar_of_group_count(np);
 
 		if (num_grps == 0 || num_grps > MAXGROUPS) {
 			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@
 
 	/* Parse and initialize group specific information */
 	if (priv->mode == MQ_MG_MODE) {
-		for_each_child_of_node(np, child) {
+		for_each_available_child_of_node(np, child) {
+			if (of_node_cmp(child->name, "queue-group"))
+				continue;
+
 			err = gfar_parse_group(child, priv, model);
 			if (err)
 				goto err_grp_init;
@@ -3162,8 +3177,8 @@
 	struct phy_device *phydev = priv->phydev;
 
 	if (unlikely(phydev->link != priv->oldlink ||
-		     phydev->duplex != priv->oldduplex ||
-		     phydev->speed != priv->oldspeed))
+		     (phydev->link && (phydev->duplex != priv->oldduplex ||
+				       phydev->speed != priv->oldspeed))))
 		gfar_update_link_state(priv);
 }
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb..c05e507 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@
 	device_remove_file(&dev->dev, &dev_attr_remove_port);
 }
 
+static int ehea_reboot_notifier(struct notifier_block *nb,
+				unsigned long action, void *unused)
+{
+	if (action == SYS_RESTART) {
+		pr_info("Reboot: freeing all eHEA resources\n");
+		ibmebus_unregister_driver(&ehea_driver);
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+	.notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+			     unsigned long action, void *data)
+{
+	int ret = NOTIFY_BAD;
+	struct memory_notify *arg = data;
+
+	mutex_lock(&dlpar_mem_lock);
+
+	switch (action) {
+	case MEM_CANCEL_OFFLINE:
+		pr_info("memory offlining canceled");
+		/* Fall through: re-add canceled memory block */
+
+	case MEM_ONLINE:
+		pr_info("memory is going online");
+		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+		if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+			goto out_unlock;
+		ehea_rereg_mrs();
+		break;
+
+	case MEM_GOING_OFFLINE:
+		pr_info("memory is going offline");
+		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+		if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+			goto out_unlock;
+		ehea_rereg_mrs();
+		break;
+
+	default:
+		break;
+	}
+
+	ehea_update_firmware_handles();
+	ret = NOTIFY_OK;
+
+out_unlock:
+	mutex_unlock(&dlpar_mem_lock);
+	return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+	.notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+	int i;
+
+	if (ehea_fw_handles.arr)
+		for (i = 0; i < ehea_fw_handles.num_entries; i++)
+			ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+					     ehea_fw_handles.arr[i].fwh,
+					     FORCE_FREE);
+
+	if (ehea_bcmc_regs.arr)
+		for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+			ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+					      ehea_bcmc_regs.arr[i].port_id,
+					      ehea_bcmc_regs.arr[i].reg_type,
+					      ehea_bcmc_regs.arr[i].macaddr,
+					      0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+	int ret = 0;
+
+	if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+		return 0;
+
+	ret = ehea_create_busmap();
+	if (ret) {
+		pr_info("ehea_create_busmap failed\n");
+		goto out;
+	}
+
+	ret = register_reboot_notifier(&ehea_reboot_nb);
+	if (ret) {
+		pr_info("register_reboot_notifier failed\n");
+		goto out;
+	}
+
+	ret = register_memory_notifier(&ehea_mem_nb);
+	if (ret) {
+		pr_info("register_memory_notifier failed\n");
+		goto out2;
+	}
+
+	ret = crash_shutdown_register(ehea_crash_handler);
+	if (ret) {
+		pr_info("crash_shutdown_register failed\n");
+		goto out3;
+	}
+
+	return 0;
+
+out3:
+	unregister_memory_notifier(&ehea_mem_nb);
+out2:
+	unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+	return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+	if (atomic_read(&ehea_memory_hooks_registered))
+		return;
+
+	unregister_reboot_notifier(&ehea_reboot_nb);
+	if (crash_shutdown_unregister(ehea_crash_handler))
+		pr_info("failed unregistering crash handler\n");
+	unregister_memory_notifier(&ehea_mem_nb);
+}
+
 static int ehea_probe_adapter(struct platform_device *dev)
 {
 	struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@
 	int ret;
 	int i;
 
+	ret = ehea_register_memory_hooks();
+	if (ret)
+		return ret;
+
 	if (!dev || !dev->dev.of_node) {
 		pr_err("Invalid ibmebus device probed\n");
 		return -EINVAL;
@@ -3392,81 +3529,6 @@
 	return 0;
 }
 
-static void ehea_crash_handler(void)
-{
-	int i;
-
-	if (ehea_fw_handles.arr)
-		for (i = 0; i < ehea_fw_handles.num_entries; i++)
-			ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
-					     ehea_fw_handles.arr[i].fwh,
-					     FORCE_FREE);
-
-	if (ehea_bcmc_regs.arr)
-		for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
-			ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
-					      ehea_bcmc_regs.arr[i].port_id,
-					      ehea_bcmc_regs.arr[i].reg_type,
-					      ehea_bcmc_regs.arr[i].macaddr,
-					      0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
-                             unsigned long action, void *data)
-{
-	int ret = NOTIFY_BAD;
-	struct memory_notify *arg = data;
-
-	mutex_lock(&dlpar_mem_lock);
-
-	switch (action) {
-	case MEM_CANCEL_OFFLINE:
-		pr_info("memory offlining canceled");
-		/* Readd canceled memory block */
-	case MEM_ONLINE:
-		pr_info("memory is going online");
-		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-		if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
-			goto out_unlock;
-		ehea_rereg_mrs();
-		break;
-	case MEM_GOING_OFFLINE:
-		pr_info("memory is going offline");
-		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-		if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
-			goto out_unlock;
-		ehea_rereg_mrs();
-		break;
-	default:
-		break;
-	}
-
-	ehea_update_firmware_handles();
-	ret = NOTIFY_OK;
-
-out_unlock:
-	mutex_unlock(&dlpar_mem_lock);
-	return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
-	.notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
-				unsigned long action, void *unused)
-{
-	if (action == SYS_RESTART) {
-		pr_info("Reboot: freeing all eHEA resources\n");
-		ibmebus_unregister_driver(&ehea_driver);
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
-	.notifier_call = ehea_reboot_notifier,
-};
-
 static int check_module_parm(void)
 {
 	int ret = 0;
@@ -3520,26 +3582,10 @@
 	if (ret)
 		goto out;
 
-	ret = ehea_create_busmap();
-	if (ret)
-		goto out;
-
-	ret = register_reboot_notifier(&ehea_reboot_nb);
-	if (ret)
-		pr_info("failed registering reboot notifier\n");
-
-	ret = register_memory_notifier(&ehea_mem_nb);
-	if (ret)
-		pr_info("failed registering memory remove notifier\n");
-
-	ret = crash_shutdown_register(ehea_crash_handler);
-	if (ret)
-		pr_info("failed registering crash handler\n");
-
 	ret = ibmebus_register_driver(&ehea_driver);
 	if (ret) {
 		pr_err("failed registering eHEA device driver on ebus\n");
-		goto out2;
+		goto out;
 	}
 
 	ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@
 	if (ret) {
 		pr_err("failed to register capabilities attribute, ret=%d\n",
 		       ret);
-		goto out3;
+		goto out2;
 	}
 
 	return ret;
 
-out3:
-	ibmebus_unregister_driver(&ehea_driver);
 out2:
-	unregister_memory_notifier(&ehea_mem_nb);
-	unregister_reboot_notifier(&ehea_reboot_nb);
-	crash_shutdown_unregister(ehea_crash_handler);
+	ibmebus_unregister_driver(&ehea_driver);
 out:
 	return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
-	int ret;
-
 	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
 	ibmebus_unregister_driver(&ehea_driver);
-	unregister_reboot_notifier(&ehea_reboot_nb);
-	ret = crash_shutdown_unregister(ehea_crash_handler);
-	if (ret)
-		pr_info("failed unregistering crash handler\n");
-	unregister_memory_notifier(&ehea_mem_nb);
+	ehea_unregister_memory_hooks();
 	kfree(ehea_fw_handles.arr);
 	kfree(ehea_bcmc_regs.arr);
 	ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc..cd7675a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@
 	ibmveth_replenish_task(adapter);
 
 	if (frames_processed < budget) {
+		napi_complete(napi);
+
 		/* We think we are done - reenable interrupts,
 		 * then check once more to make sure we are done.
 		 */
@@ -1144,8 +1146,6 @@
 
 		BUG_ON(lpar_rc != H_SUCCESS);
 
-		napi_complete(napi);
-
 		if (ibmveth_rxq_pending_buffer(adapter) &&
 		    napi_reschedule(napi)) {
 			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1327,6 +1327,28 @@
 	return ret;
 }
 
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct ibmveth_adapter *adapter = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	u64 mac_address;
+	int rc;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+	rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+	if (rc) {
+		netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+		return rc;
+	}
+
+	ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+	return 0;
+}
+
 static const struct net_device_ops ibmveth_netdev_ops = {
 	.ndo_open		= ibmveth_open,
 	.ndo_stop		= ibmveth_close,
@@ -1337,7 +1359,7 @@
 	.ndo_fix_features	= ibmveth_fix_features,
 	.ndo_set_features	= ibmveth_set_features,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_set_mac_address    = ibmveth_set_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ibmveth_poll_controller,
 #endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffe..6aea65d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@
 	 * The grst delay value is in 100ms units, and we'll wait a
 	 * couple counts longer to be sure we don't just miss the end.
 	 */
-	grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
-			>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
 	for (cnt = 0; cnt < grst_del + 2; cnt++) {
 		reg = rd32(hw, I40E_GLGEN_RSTAT);
 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@
 
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-	if (!status)
+	if (!status && filter_index)
 		*filter_index = resp->index;
 
 	return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb6..a11c70c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@
 	u32 val;
 
 	val = rd32(hw, I40E_PRTDCB_GENC);
-	*delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+	*delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
 		       I40E_PRTDCB_GENC_PFCLDA_SHIFT);
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f9..c17ee77 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@
 	if (!cmd_buf)
 		return count;
 	bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-	if (bytes_not_copied < 0)
+	if (bytes_not_copied < 0) {
+		kfree(cmd_buf);
 		return bytes_not_copied;
+	}
 	if (bytes_not_copied > 0)
 		count -= bytes_not_copied;
 	cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281b..dadda3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@
 	vsi->tc_config.numtc = numtc;
 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
 	/* Number of queues per enabled TC */
-	num_tc_qps = vsi->alloc_queue_pairs/numtc;
+	/* In MFP case we can have a much lower count of MSIx
+	 * vectors available and so we need to lower the used
+	 * q count.
+	 */
+	qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+	num_tc_qps = qcount / numtc;
 	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
 
 	/* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@
 	u16 qoffset, qcount;
 	int i, n;
 
-	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
-		return;
+	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+		/* Reset the TC information */
+		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			rx_ring = vsi->rx_rings[i];
+			tx_ring = vsi->tx_rings[i];
+			rx_ring->dcb_tc = 0;
+			tx_ring->dcb_tc = 0;
+		}
+	}
 
 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
 		if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@
 {
 	int i;
 
+	i40e_stop_misc_vector(pf);
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+		synchronize_irq(pf->msix_entries[0].vector);
+		free_irq(pf->msix_entries[0].vector, pf);
+	}
+
 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
 	for (i = 0; i < pf->num_alloc_vsi; i++)
 		if (pf->vsi[i])
@@ -5254,8 +5272,14 @@
 
 	/* Wait for the PF's Tx queues to be disabled */
 	ret = i40e_pf_wait_txq_disabled(pf);
-	if (!ret)
+	if (ret) {
+		/* Schedule PF reset to recover */
+		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+		i40e_service_event_schedule(pf);
+	} else {
 		i40e_pf_unquiesce_all_vsi(pf);
+	}
+
 exit:
 	return ret;
 }
@@ -5587,7 +5611,8 @@
 	int i, v;
 
 	/* If we're down or resetting, just bail */
-	if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+	if (test_bit(__I40E_DOWN, &pf->state) ||
+	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
 		return;
 
 	/* for each VSI/netdev
@@ -9533,6 +9558,7 @@
 	set_bit(__I40E_DOWN, &pf->state);
 	del_timer_sync(&pf->service_timer);
 	cancel_work_sync(&pf->service_task);
+	i40e_fdir_teardown(pf);
 
 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
 		i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@
 	if (pf->vsi[pf->lan_vsi])
 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
-	i40e_stop_misc_vector(pf);
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-		synchronize_irq(pf->msix_entries[0].vector);
-		free_irq(pf->msix_entries[0].vector, pf);
-	}
-
 	/* shutdown and destroy the HMC */
 	if (pf->hw.hmc.hmc_obj) {
 		ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@
 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+	i40e_clear_interrupt_scheme(pf);
+
 	if (system_state == SYSTEM_POWER_OFF) {
 		pci_wake_from_d3(pdev, pf->wol_en);
 		pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e..5defe0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@
 {
 	i40e_status status;
 	enum i40e_nvmupd_cmd upd_cmd;
+	bool retry_attempt = false;
 
 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
 
+retry:
 	switch (upd_cmd) {
 	case I40E_NVMUPD_WRITE_CON:
 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@
 		*errno = -ESRCH;
 		break;
 	}
+
+	/* In some circumstances, a multi-write transaction takes longer
+	 * than the default 3 minute timeout on the write semaphore.  If
+	 * the write failed with an EBUSY status, this is likely the problem,
+	 * so here we try to reacquire the semaphore then retry the write.
+	 * We only do one retry, then give up.
+	 */
+	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+	    !retry_attempt) {
+		i40e_status old_status = status;
+		u32 old_asq_status = hw->aq.asq_last_status;
+		u32 gtime;
+
+		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+		if (gtime >= hw->nvm.hw_semaphore_timeout) {
+			i40e_debug(hw, I40E_DEBUG_ALL,
+				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+				   gtime, hw->nvm.hw_semaphore_timeout);
+			i40e_release_nvm(hw);
+			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+			if (status) {
+				i40e_debug(hw, I40E_DEBUG_ALL,
+					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+					   hw->aq.asq_last_status);
+				status = old_status;
+				hw->aq.asq_last_status = old_asq_status;
+			} else {
+				retry_attempt = true;
+				goto retry;
+			}
+		}
+	}
+
 	return status;
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d..bbf1b12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@
 }
 
 /**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+	return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
  *
@@ -594,10 +608,16 @@
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-	u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-			? ring->next_to_use
-			: ring->next_to_use + ring->count);
-	return ntu - ring->next_to_clean;
+	u32 head, tail;
+
+	head = i40e_get_head(ring);
+	tail = readl(ring->tail);
+
+	if (head != tail)
+		return (head < tail) ?
+			tail - head : (tail + ring->count - head);
+
+	return 0;
 }
 
 /**
@@ -606,6 +626,8 @@
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+	u32 tx_done = tx_ring->stats.packets;
+	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
 	u32 tx_pending = i40e_get_tx_pending(tx_ring);
 	struct i40e_pf *pf = tx_ring->vsi->back;
 	bool ret = false;
@@ -623,41 +645,25 @@
 	 * run the check_tx_hang logic with a transmit completion
 	 * pending but without time to complete it yet.
 	 */
-	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-	    (tx_pending >= I40E_MIN_DESC_PENDING)) {
+	if ((tx_done_old == tx_done) && tx_pending) {
 		/* make sure it is true for two checks in a row */
 		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
 				       &tx_ring->state);
-	} else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-		   (tx_pending < I40E_MIN_DESC_PENDING) &&
-		   (tx_pending > 0)) {
+	} else if (tx_done_old == tx_done &&
+		   (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
 		if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
 			dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
 				 tx_pending, tx_ring->queue_index);
 		pf->tx_sluggish_count++;
 	} else {
 		/* update completed stats and disarm the hang check */
-		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+		tx_ring->tx_stats.tx_done_old = tx_done;
 		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
 	}
 
 	return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-	return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -2140,6 +2146,67 @@
 }
 
 /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+			       const u8 hdr_len)
+{
+	struct skb_frag_struct *frag;
+	bool linearize = false;
+	unsigned int size = 0;
+	u16 num_frags;
+	u16 gso_segs;
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+		u16 j = 1;
+
+		if (num_frags < (I40E_MAX_BUFFER_TXD))
+			goto linearize_chk_done;
+		/* try the simple math, if we have too many frags per segment */
+		if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+		    I40E_MAX_BUFFER_TXD) {
+			linearize = true;
+			goto linearize_chk_done;
+		}
+		frag = &skb_shinfo(skb)->frags[0];
+		size = hdr_len;
+		/* we might still have more fragments per segment */
+		do {
+			size += skb_frag_size(frag);
+			frag++; j++;
+			if (j == I40E_MAX_BUFFER_TXD) {
+				if (size < skb_shinfo(skb)->gso_size) {
+					linearize = true;
+					break;
+				}
+				j = 1;
+				size -= skb_shinfo(skb)->gso_size;
+				if (size)
+					j++;
+				size += hdr_len;
+			}
+			num_frags--;
+		} while (num_frags);
+	} else {
+		if (num_frags >= I40E_MAX_BUFFER_TXD)
+			linearize = true;
+	}
+
+linearize_chk_done:
+	return linearize;
+}
+
+/**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
  * @skb:      send buffer
@@ -2396,6 +2463,10 @@
 	if (tsyn)
 		tx_flags |= I40E_TX_FLAGS_TSYN;
 
+	if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+		if (skb_linearize(skb))
+			goto out_drop;
+
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b0023..dff0bae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD	8
 #define I40E_MIN_TX_LEN		17
 #define I40E_MAX_DATA_PER_TXD	8192
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 2900438..7088915 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@
 }
 
 /**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+	return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
  *
@@ -134,10 +148,16 @@
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-	u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-			? ring->next_to_use
-			: ring->next_to_use + ring->count);
-	return ntu - ring->next_to_clean;
+	u32 head, tail;
+
+	head = i40e_get_head(ring);
+	tail = readl(ring->tail);
+
+	if (head != tail)
+		return (head < tail) ?
+			tail - head : (tail + ring->count - head);
+
+	return 0;
 }
 
 /**
@@ -146,6 +166,8 @@
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+	u32 tx_done = tx_ring->stats.packets;
+	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
 	u32 tx_pending = i40e_get_tx_pending(tx_ring);
 	bool ret = false;
 
@@ -162,36 +184,20 @@
 	 * run the check_tx_hang logic with a transmit completion
 	 * pending but without time to complete it yet.
 	 */
-	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-	    (tx_pending >= I40E_MIN_DESC_PENDING)) {
+	if ((tx_done_old == tx_done) && tx_pending) {
 		/* make sure it is true for two checks in a row */
 		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
 				       &tx_ring->state);
-	} else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
-		   !(tx_pending < I40E_MIN_DESC_PENDING) ||
-		   !(tx_pending > 0)) {
+	} else if (tx_done_old == tx_done &&
+		   (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
 		/* update completed stats and disarm the hang check */
-		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+		tx_ring->tx_stats.tx_done_old = tx_done;
 		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
 	}
 
 	return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-	return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -1206,17 +1212,16 @@
 	if (err < 0)
 		return err;
 
-	if (protocol == htons(ETH_P_IP)) {
-		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+	iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+	ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+	if (iph->version == 4) {
 		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
 		iph->tot_len = 0;
 		iph->check = 0;
 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
 						 0, IPPROTO_TCP, 0);
-	} else if (skb_is_gso_v6(skb)) {
-
-		ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-					   : ipv6_hdr(skb);
+	} else if (ipv6h->version == 6) {
 		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
 		ipv6h->payload_len = 0;
 		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@
 					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
 			}
 		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-			if (tx_flags & I40E_TX_FLAGS_TSO) {
-				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+			if (tx_flags & I40E_TX_FLAGS_TSO)
 				ip_hdr(skb)->check = 0;
-			} else {
-				*cd_tunneling |=
-					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-			}
 		}
 
 		/* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@
 				   ((skb_inner_network_offset(skb) -
 					skb_transport_offset(skb)) >> 1) <<
 				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+		if (this_ip_hdr->version == 6) {
+			tx_flags &= ~I40E_TX_FLAGS_IPV4;
+			tx_flags |= I40E_TX_FLAGS_IPV6;
+		}
+
 
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@
 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+			       const u8 hdr_len)
+{
+	struct skb_frag_struct *frag;
+	bool linearize = false;
+	unsigned int size = 0;
+	u16 num_frags;
+	u16 gso_segs;
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+		u16 j = 1;
+
+		if (num_frags < (I40E_MAX_BUFFER_TXD))
+			goto linearize_chk_done;
+		/* try the simple math, if we have too many frags per segment */
+		if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+		    I40E_MAX_BUFFER_TXD) {
+			linearize = true;
+			goto linearize_chk_done;
+		}
+		frag = &skb_shinfo(skb)->frags[0];
+		size = hdr_len;
+		/* we might still have more fragments per segment */
+		do {
+			size += skb_frag_size(frag);
+			frag++; j++;
+			if (j == I40E_MAX_BUFFER_TXD) {
+				if (size < skb_shinfo(skb)->gso_size) {
+					linearize = true;
+					break;
+				}
+				j = 1;
+				size -= skb_shinfo(skb)->gso_size;
+				if (size)
+					j++;
+				size += hdr_len;
+			}
+			num_frags--;
+		} while (num_frags);
+	} else {
+		if (num_frags >= I40E_MAX_BUFFER_TXD)
+			linearize = true;
+	}
+
+linearize_chk_done:
+	return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -1654,6 +1721,10 @@
 	else if (tso)
 		tx_flags |= I40E_TX_FLAGS_TSO;
 
+	if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+		if (skb_linearize(skb))
+			goto out_drop;
+
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903..c950a03 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD	8
 #define I40E_MIN_TX_LEN		17
 #define I40E_MAX_DATA_PER_TXD	8192
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4..ebce5bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@
 	/* Schedule multicast task to populate multicast list */
 	queue_work(mdev->workqueue, &priv->rx_mode_task);
 
-	mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
-
 #ifdef CONFIG_MLX4_EN_VXLAN
 	if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
 		vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@
 		queue_delayed_work(mdev->workqueue, &priv->service_task,
 				   SERVICE_TASK_DELAY);
 
+	mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
 	return 0;
 
 out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66..a61009f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@
 {
 	u32 loopback_ok = 0;
 	int i;
-
+	bool gro_enabled;
 
         priv->loopback_ok = 0;
 	priv->validate_loopback = 1;
+	gro_enabled = priv->dev->features & NETIF_F_GRO;
 
 	mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+	priv->dev->features &= ~NETIF_F_GRO;
 
 	/* xmit */
 	if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@
 mlx4_en_test_loopback_exit:
 
 	priv->validate_loopback = 0;
+
+	if (gro_enabled)
+		priv->dev->features |= NETIF_F_GRO;
+
 	mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
 	return !loopback_ok;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e..ebbe244 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@
 	unsigned long rx_chksum_none;
 	unsigned long rx_chksum_complete;
 	unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS		9
+#define NUM_PORT_STATS		10
 };
 
 struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553..eda29db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
 		   enum mlx4_update_qp_attr attr,
 		   struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d2..d97ca88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@
 	struct mlx4_vport_oper_state *vp_oper;
 	struct mlx4_priv *priv;
 	u32 qp_type;
-	int port;
+	int port, err = 0;
 
 	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 	priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@
 			} else {
 				struct mlx4_update_qp_params params = {.flags = 0};
 
-				mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+				err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+				if (err)
+					goto out;
 			}
 		}
 
@@ -773,7 +775,8 @@
 		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 	}
-	return 0;
+out:
+	return err;
 }
 
 static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d..57a6e6c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@
 	if (mac->phydev)
 		phy_start(mac->phydev);
 
-	init_timer(&mac->tx->clean_timer);
-	mac->tx->clean_timer.function = pasemi_mac_tx_timer;
-	mac->tx->clean_timer.data = (unsigned long)mac->tx;
-	mac->tx->clean_timer.expires = jiffies+HZ;
-	add_timer(&mac->tx->clean_timer);
+	setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+		    (unsigned long)mac->tx);
+	mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
 	return 0;
 
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae..0a5e204 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@
 
 } __attribute__ ((aligned(64)));
 
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
 struct rcv_desc {
 	__le16 reference_handle;
 	__le16 reserved;
@@ -499,7 +499,7 @@
 #define NETXEN_IMAGE_START	0x43000	/* compressed image */
 #define NETXEN_SECONDARY_START	0x200000	/* backup images */
 #define NETXEN_PXE_START	0x3E0000	/* PXE boot rom */
-#define NETXEN_USER_START	0x3E8000	/* Firmare info */
+#define NETXEN_USER_START	0x3E8000	/* Firmware info */
 #define NETXEN_FIXED_START	0x3F0000	/* backup of crbinit */
 #define NETXEN_USER_START_OLD	NETXEN_PXE_START /* very old flash */
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa43176..f221126 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@
 #define QLCNIC_BRDCFG_START	0x4000		/* board config */
 #define QLCNIC_BOOTLD_START	0x10000		/* bootld */
 #define QLCNIC_IMAGE_START	0x43000		/* compressed image */
-#define QLCNIC_USER_START	0x3E8000	/* Firmare info */
+#define QLCNIC_USER_START	0x3E8000	/* Firmware info */
 
 #define QLCNIC_FW_VERSION_OFFSET	(QLCNIC_USER_START+0x408)
 #define QLCNIC_FW_SIZE_OFFSET		(QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020a..c70ab40 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@
 	int rc = -EINVAL;
 
 	if (!rtl_fw_format_ok(tp, rtl_fw)) {
-		netif_err(tp, ifup, dev, "invalid firwmare\n");
+		netif_err(tp, ifup, dev, "invalid firmware\n");
 		goto out;
 	}
 
@@ -5067,8 +5067,6 @@
 	RTL_W8(ChipCmd, CmdReset);
 
 	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
-	netdev_reset_queue(tp->dev);
 }
 
 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@
 	u32 status, len;
 	u32 opts[2];
 	int frags;
-	bool stop_queue;
 
 	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@
 
 	txd->opts2 = cpu_to_le32(opts[1]);
 
-	netdev_sent_queue(dev, skb->len);
-
 	skb_tx_timestamp(skb);
 
 	/* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@
 
 	tp->cur_tx += frags + 1;
 
-	stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+	RTL_W8(TxPoll, NPQ);
 
-	if (!skb->xmit_more || stop_queue ||
-	    netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
-		RTL_W8(TxPoll, NPQ);
+	mmiowb();
 
-		mmiowb();
-	}
-
-	if (stop_queue) {
+	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
 		 * not miss a ring update when it notices a stopped queue.
 		 */
@@ -7198,7 +7188,6 @@
 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 {
 	unsigned int dirty_tx, tx_left;
-	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	dirty_tx = tp->dirty_tx;
 	smp_rmb();
@@ -7222,8 +7211,10 @@
 		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
 				     tp->TxDescArray + entry);
 		if (status & LastFrag) {
-			pkts_compl++;
-			bytes_compl += tx_skb->skb->len;
+			u64_stats_update_begin(&tp->tx_stats.syncp);
+			tp->tx_stats.packets++;
+			tp->tx_stats.bytes += tx_skb->skb->len;
+			u64_stats_update_end(&tp->tx_stats.syncp);
 			dev_kfree_skb_any(tx_skb->skb);
 			tx_skb->skb = NULL;
 		}
@@ -7232,13 +7223,6 @@
 	}
 
 	if (tp->dirty_tx != dirty_tx) {
-		netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
-		u64_stats_update_begin(&tp->tx_stats.syncp);
-		tp->tx_stats.packets += pkts_compl;
-		tp->tx_stats.bytes += bytes_compl;
-		u64_stats_update_end(&tp->tx_stats.syncp);
-
 		tp->dirty_tx = dirty_tx;
 		/* Sync with rtl8169_start_xmit:
 		 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd2..736d5d1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@
 	.tpauser	= 1,
 	.hw_swap	= 1,
 	.rmiimode	= 1,
-	.shift_rd0	= 1,
 };
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@
 	msleep(2); /* max frame time at 10 Mbps < 1250 us */
 	sh_eth_get_stats(ndev);
 	sh_eth_reset(ndev);
+
+	/* Set MAC address again */
+	update_mac_address(ndev);
 }
 
 /* free Tx skb function */
@@ -1407,6 +1409,8 @@
 		txdesc = &mdp->tx_ring[entry];
 		if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
 			break;
+		/* TACT bit must be checked before all the following reads */
+		rmb();
 		/* Free the original skb. */
 		if (mdp->tx_skbuff[entry]) {
 			dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@
 	limit = boguscnt;
 	rxdesc = &mdp->rx_ring[entry];
 	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+		/* RACT bit must be checked before all the following reads */
+		rmb();
 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
 		pkt_len = rxdesc->frame_length;
 
@@ -1455,8 +1461,8 @@
 
 		/* In case of almost all GETHER/ETHERs, the Receive Frame State
 		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
-		 * bit 0. However, in case of the R8A7740, R8A779x, and
-		 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+		 * bit 0. However, in case of the R8A7740 and R7S72100
+		 * the RFS bits are from bit 25 to bit 16. So, the
 		 * driver needs right shifting by 16.
 		 */
 		if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@
 			skb_checksum_none_assert(skb);
 			rxdesc->addr = dma_addr;
 		}
+		wmb(); /* RACT bit must be set after all the above writes */
 		if (entry >= mdp->num_rx_ring - 1)
 			rxdesc->status |=
 				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@
 	/* If we don't need to check status, don't. -KDU */
 	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
 		/* fix the values for the next receiving if RDE is set */
-		if (intr_status & EESR_RDE) {
+		if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
 			u32 count = (sh_eth_read(ndev, RDFAR) -
 				     sh_eth_read(ndev, RDLAR)) >> 4;
 
@@ -2174,7 +2181,7 @@
 	}
 	spin_unlock_irqrestore(&mdp->lock, flags);
 
-	if (skb_padto(skb, ETH_ZLEN))
+	if (skb_put_padto(skb, ETH_ZLEN))
 		return NETDEV_TX_OK;
 
 	entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@
 	}
 	txdesc->buffer_length = skb->len;
 
+	wmb(); /* TACT bit must be set after all the above writes */
 	if (entry >= mdp->num_tx_ring - 1)
 		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
 	else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6a..9fb6948 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@
 	u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
 	if (enable)
-		val |= 1 << rocker_port->lport;
+		val |= 1ULL << rocker_port->lport;
 	else
-		val &= ~(1 << rocker_port->lport);
+		val &= ~(1ULL << rocker_port->lport);
 	rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
 }
 
@@ -4201,6 +4201,8 @@
 
 	alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
 	rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+	if (!rocker->ports)
+		return -ENOMEM;
 	for (i = 0; i < rocker->port_count; i++) {
 		err = rocker_probe_port(rocker, i);
 		if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127..3449893 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@
     smc->packets_waiting = 0;
 
     smc_reset(dev);
-    init_timer(&smc->media);
-    smc->media.function = media_check;
-    smc->media.data = (u_long) dev;
-    smc->media.expires = jiffies + HZ;
-    add_timer(&smc->media);
+    setup_timer(&smc->media, media_check, (u_long)dev);
+    mod_timer(&smc->media, jiffies + HZ);
 
     return 0;
 } /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f9..8678e39 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,11 @@
 
 #include "smc91x.h"
 
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/assabet.h>
+#include <mach/neponset.h>
+#endif
+
 #ifndef SMC_NOWAIT
 # define SMC_NOWAIT		0
 #endif
@@ -2243,10 +2248,9 @@
 	const struct of_device_id *match = NULL;
 	struct smc_local *lp;
 	struct net_device *ndev;
-	struct resource *res;
+	struct resource *res, *ires;
 	unsigned int __iomem *addr;
 	unsigned long irq_flags = SMC_IRQ_FLAGS;
-	unsigned long irq_resflags;
 	int ret;
 
 	ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2338,25 +2342,23 @@
 		goto out_free_netdev;
 	}
 
-	ndev->irq = platform_get_irq(pdev, 0);
-	if (ndev->irq <= 0) {
+	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!ires) {
 		ret = -ENODEV;
 		goto out_release_io;
 	}
-	/*
-	 * If this platform does not specify any special irqflags, or if
-	 * the resource supplies a trigger, override the irqflags with
-	 * the trigger flags from the resource.
-	 */
-	irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
-	if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
-		irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
+
+	ndev->irq = ires->start;
+
+	if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
+		irq_flags = ires->flags & IRQF_TRIGGER_MASK;
 
 	ret = smc_request_attrib(pdev, ndev);
 	if (ret)
 		goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
-	neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+	if (machine_is_assabet() && machine_has_neponset())
+		neponset_ncr_set(NCR_ENET_OSC_EN);
 #endif
 	platform_set_drvdata(pdev, ndev);
 	ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf..3a18501 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
  * Define your architecture specific bus configuration parameters here.
  */
 
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
-    defined(CONFIG_MACH_MAINSTONE) ||\
-    defined(CONFIG_MACH_ZYLONITE) ||\
-    defined(CONFIG_MACH_LITTLETON) ||\
-    defined(CONFIG_MACH_ZYLONITE2) ||\
-    defined(CONFIG_ARCH_VIPER) ||\
-    defined(CONFIG_MACH_STARGATE2) ||\
-    defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
 
 #include <asm/mach-types.h>
 
@@ -74,95 +67,8 @@
 /* We actually can't write halfwords properly if not word aligned */
 static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
-	if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
-		unsigned int v = val << 16;
-		v |= readl(ioaddr + (reg & ~2)) & 0xffff;
-		writel(v, ioaddr + (reg & ~2));
-	} else {
-		writew(val, ioaddr + reg);
-	}
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT	1
-#define SMC_CAN_USE_16BIT	1
-#define SMC_CAN_USE_32BIT	0
-#define SMC_IO_SHIFT		0
-#define SMC_NOWAIT		1
-
-#define SMC_inb(a, r)		readb((a) + (r))
-#define SMC_insb(a, r, p, l)	readsb((a) + (r), p, (l))
-#define SMC_inw(a, r)		readw((a) + (r))
-#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r)	writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r)	writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS		(-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT	1
-#define SMC_CAN_USE_16BIT	0
-#define SMC_CAN_USE_32BIT	0
-#define SMC_NOWAIT		1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT		2
-
-#define SMC_inb(a, r)		readb((a) + (r))
-#define SMC_outb(v, a, r)	writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l)	readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS		(-1)	/* from resource */
-
-#elif	defined(CONFIG_MACH_LOGICPD_PXA270) ||	\
-	defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT	0
-#define SMC_CAN_USE_16BIT	1
-#define SMC_CAN_USE_32BIT	0
-#define SMC_IO_SHIFT		0
-#define SMC_NOWAIT		1
-
-#define SMC_inw(a, r)		readw((a) + (r))
-#define SMC_outw(v, a, r)	writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
-
-#elif	defined(CONFIG_ARCH_INNOKOM) || \
-	defined(CONFIG_ARCH_PXA_IDP) || \
-	defined(CONFIG_ARCH_RAMSES) || \
-	defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT	1
-#define SMC_CAN_USE_16BIT	1
-#define SMC_CAN_USE_32BIT	1
-#define SMC_IO_SHIFT		0
-#define SMC_NOWAIT		1
-#define SMC_USE_PXA_DMA		1
-
-#define SMC_inb(a, r)		readb((a) + (r))
-#define SMC_inw(a, r)		readw((a) + (r))
-#define SMC_inl(a, r)		readl((a) + (r))
-#define SMC_outb(v, a, r)	writeb(v, (a) + (r))
-#define SMC_outl(v, a, r)	writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l)	readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS		(-1)	/* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
-	if (reg & 2) {
+	if ((machine_is_mainstone() || machine_is_stargate2() ||
+	     machine_is_pxa_idp()) && reg & 2) {
 		unsigned int v = val << 16;
 		v |= readl(ioaddr + (reg & ~2)) & 0xffff;
 		writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@
 #define RPC_LSA_DEFAULT         RPC_LED_100_10
 #define RPC_LSB_DEFAULT         RPC_LED_TX_RX
 
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT	0
-#define SMC_CAN_USE_16BIT	1
-#define SMC_CAN_USE_32BIT	0
-#define SMC_NOWAIT		1
-
-#define SMC_inw(a, r)		readw((a) + (r))
-#define SMC_outw(v, a, r)	writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS		IRQF_TRIGGER_HIGH
-
 #elif defined(CONFIG_COLDFIRE)
 
 #define SMC_CAN_USE_8BIT	0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3..a0ea84f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@
 		spin_lock_irqsave(&priv->lock, flags);
 		if (!priv->eee_active) {
 			priv->eee_active = 1;
-			init_timer(&priv->eee_ctrl_timer);
-			priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-			priv->eee_ctrl_timer.data = (unsigned long)priv;
-			priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
-			add_timer(&priv->eee_ctrl_timer);
+			setup_timer(&priv->eee_ctrl_timer,
+				    stmmac_eee_ctrl_timer,
+				    (unsigned long)priv);
+			mod_timer(&priv->eee_ctrl_timer,
+				  STMMAC_LPI_T(eee_timer));
 
 			priv->hw->mac->set_eee_timer(priv->hw,
 						     STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846eb..f9b42f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@
 	struct stmmac_priv *priv = NULL;
 	struct plat_stmmacenet_data *plat_dat = NULL;
 	const char *mac = NULL;
+	int irq, wol_irq, lpi_irq;
+
+	/* Get IRQ information early to have an ability to ask for deferred
+	 * probe if needed before we went too far with resource allocation.
+	 */
+	irq = platform_get_irq_byname(pdev, "macirq");
+	if (irq < 0) {
+		if (irq != -EPROBE_DEFER) {
+			dev_err(dev,
+				"MAC IRQ configuration information not found\n");
+		}
+		return irq;
+	}
+
+	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+	 * The external wake up irq can be passed through the platform code
+	 * named as "eth_wake_irq"
+	 *
+	 * In case the wake up interrupt is not passed from the platform
+	 * so the driver will continue to use the mac irq (ndev->irq)
+	 */
+	wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+	if (wol_irq < 0) {
+		if (wol_irq == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		wol_irq = irq;
+	}
+
+	lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+	if (lpi_irq == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@
 		return PTR_ERR(priv);
 	}
 
+	/* Copy IRQ values to priv structure which is now avaialble */
+	priv->dev->irq = irq;
+	priv->wol_irq = wol_irq;
+	priv->lpi_irq = lpi_irq;
+
 	/* Get MAC address if available (DT) */
 	if (mac)
 		memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
 
-	/* Get the MAC information */
-	priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
-	if (priv->dev->irq < 0) {
-		if (priv->dev->irq != -EPROBE_DEFER) {
-			netdev_err(priv->dev,
-				   "MAC IRQ configuration information not found\n");
-		}
-		return priv->dev->irq;
-	}
-
-	/*
-	 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
-	 * The external wake up irq can be passed through the platform code
-	 * named as "eth_wake_irq"
-	 *
-	 * In case the wake up interrupt is not passed from the platform
-	 * so the driver will continue to use the mac irq (ndev->irq)
-	 */
-	priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-	if (priv->wol_irq < 0) {
-		if (priv->wol_irq == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		priv->wol_irq = priv->dev->irq;
-	}
-
-	priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-	if (priv->lpi_irq == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
-
 	platform_set_drvdata(pdev, priv->dev);
 
 	pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f90..0c5842a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@
 		*flow_type = IP_USER_FLOW;
 		break;
 	default:
-		return 0;
+		return -EINVAL;
 	}
 
-	return 1;
+	return 0;
 }
 
 static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@
 	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
 		TCAM_V4KEY0_CLASS_CODE_SHIFT;
 	ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
 	if (ret < 0) {
 		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
 			    parent->index);
-		ret = -EINVAL;
 		goto out;
 	}
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d..a1bbaf6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@
 	cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
 			   port_mask, ALE_VLAN, slave->port_vlan, 0);
 	cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-		priv->host_port, ALE_VLAN, slave->port_vlan);
+		priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cpsw_suspend(struct device *dev)
 {
 	struct platform_device	*pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@
 	}
 	return 0;
 }
+#endif
 
-static const struct dev_pm_ops cpsw_pm_ops = {
-	.suspend	= cpsw_suspend,
-	.resume		= cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
 static const struct of_device_id cpsw_of_mtable[] = {
 	{ .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b4..c00084d 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int davinci_mdio_suspend(struct device *dev)
 {
 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@
 
 	return 0;
 }
+#endif
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-	.suspend_late	= davinci_mdio_suspend,
-	.resume_early	= davinci_mdio_resume,
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
 };
 
 #if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931..0e0fbb5 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@
 	}
 
 	if (rx_count < budget) {
+		napi_complete(napi);
 		w5100_write(priv, W5100_IMR, IR_S0);
 		mmiowb();
-		napi_complete(napi);
 	}
 
 	return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9..4b31000 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@
 	}
 
 	if (rx_count < budget) {
+		napi_complete(napi);
 		w5300_write(priv, W5300_IMR, IR_S0);
 		mmiowb();
-		napi_complete(napi);
 	}
 
 	return rx_count;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f..9e16a28 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@
 	int i;
 	static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-	if (dev->flags & IFF_ALLMULTI) {
+	if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
 		for (i = 0; i < ETH_ALEN; i++) {
 			__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
 			__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index e40fdfc..27ecc5c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@
 	} /* else everything is zero */
 }
 
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 				struct iov_iter *from, int noblock)
 {
-	int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+	int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
 	struct sk_buff *skb;
 	struct macvlan_dev *vlan;
 	unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@
 			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
 	}
 
-	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
 				linear, noblock, &err);
 	if (!skb)
 		goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54..32efbd4 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@
 #define XGBE_PHY_CDR_RATE_PROPERTY	"amd,serdes-cdr-rate"
 #define XGBE_PHY_PQ_SKEW_PROPERTY	"amd,serdes-pq-skew"
 #define XGBE_PHY_TX_AMP_PROPERTY	"amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY	"amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY	"amd,serdes-dfe-tap-enable"
 
 #define XGBE_PHY_SPEEDS			3
 #define XGBE_PHY_SPEED_1000		0
@@ -177,10 +179,12 @@
 #define SPEED_10000_BLWC		0
 #define SPEED_10000_CDR			0x7
 #define SPEED_10000_PLL			0x1
-#define SPEED_10000_PQ			0x1e
+#define SPEED_10000_PQ			0x12
 #define SPEED_10000_RATE		0x0
 #define SPEED_10000_TXAMP		0xa
 #define SPEED_10000_WORD		0x7
+#define SPEED_10000_DFE_TAP_CONFIG	0x1
+#define SPEED_10000_DFE_TAP_ENABLE	0x7f
 
 #define SPEED_2500_BLWC			1
 #define SPEED_2500_CDR			0x2
@@ -189,6 +193,8 @@
 #define SPEED_2500_RATE			0x1
 #define SPEED_2500_TXAMP		0xf
 #define SPEED_2500_WORD			0x1
+#define SPEED_2500_DFE_TAP_CONFIG	0x3
+#define SPEED_2500_DFE_TAP_ENABLE	0x0
 
 #define SPEED_1000_BLWC			1
 #define SPEED_1000_CDR			0x2
@@ -197,16 +203,25 @@
 #define SPEED_1000_RATE			0x3
 #define SPEED_1000_TXAMP		0xf
 #define SPEED_1000_WORD			0x1
+#define SPEED_1000_DFE_TAP_CONFIG	0x3
+#define SPEED_1000_DFE_TAP_ENABLE	0x0
 
 /* SerDes RxTx register offsets */
+#define RXTX_REG6			0x0018
 #define RXTX_REG20			0x0050
+#define RXTX_REG22			0x0058
 #define RXTX_REG114			0x01c8
+#define RXTX_REG129			0x0204
 
 /* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX	8
+#define RXTX_REG6_RESETB_RXD_WIDTH	1
 #define RXTX_REG20_BLWC_ENA_INDEX	2
 #define RXTX_REG20_BLWC_ENA_WIDTH	1
 #define RXTX_REG114_PQ_REG_INDEX	9
 #define RXTX_REG114_PQ_REG_WIDTH	7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX	14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH	2
 
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@
 	SPEED_10000_TXAMP,
 };
 
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+	SPEED_1000_DFE_TAP_CONFIG,
+	SPEED_2500_DFE_TAP_CONFIG,
+	SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+	SPEED_1000_DFE_TAP_ENABLE,
+	SPEED_2500_DFE_TAP_ENABLE,
+	SPEED_10000_DFE_TAP_ENABLE,
+};
+
 enum amd_xgbe_phy_an {
 	AMD_XGBE_AN_READY = 0,
 	AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@
 	u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
 	u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
 	u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+	u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+	u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
 
 	/* Auto-negotiation state machine support */
 	struct mutex an_mutex;
@@ -481,11 +510,16 @@
 		status = XSIR0_IOREAD(priv, SIR0_STATUS);
 		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
 		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-			return;
+			goto rx_reset;
 	}
 
 	netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
 		   status);
+
+rx_reset:
+	/* Perform Rx reset for the DFE changes */
+	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@
 			   priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+	XRXTX_IOWRITE(priv, RXTX_REG22,
+		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
 
 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -586,6 +624,10 @@
 			   priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+	XRXTX_IOWRITE(priv, RXTX_REG22,
+		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
 
 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -638,6 +680,10 @@
 			   priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+	XRXTX_IOWRITE(priv, RXTX_REG22,
+		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
 
 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -1668,6 +1714,38 @@
 		       sizeof(priv->serdes_tx_amp));
 	}
 
+	if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_PHY_DFE_CFG_PROPERTY,
+						     priv->serdes_dfe_tap_cfg,
+						     XGBE_PHY_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_PHY_DFE_CFG_PROPERTY);
+			goto err_sir1;
+		}
+	} else {
+		memcpy(priv->serdes_dfe_tap_cfg,
+		       amd_xgbe_phy_serdes_dfe_tap_cfg,
+		       sizeof(priv->serdes_dfe_tap_cfg));
+	}
+
+	if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+		ret = device_property_read_u32_array(phy_dev,
+						     XGBE_PHY_DFE_ENA_PROPERTY,
+						     priv->serdes_dfe_tap_ena,
+						     XGBE_PHY_SPEEDS);
+		if (ret) {
+			dev_err(dev, "invalid %s property\n",
+				XGBE_PHY_DFE_ENA_PROPERTY);
+			goto err_sir1;
+		}
+	} else {
+		memcpy(priv->serdes_dfe_tap_ena,
+		       amd_xgbe_phy_serdes_dfe_tap_ena,
+		       sizeof(priv->serdes_dfe_tap_ena));
+	}
+
 	phydev->priv = priv;
 
 	if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6a..52cd8db 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@
 }
 
 /**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ *		     speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+	unsigned int idx;
+
+	idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+	return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+		(settings[idx].setting & features);
+}
+
+/**
  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  * @phydev: the target phy_device struct
  *
@@ -1045,7 +1064,6 @@
 		int eee_lp, eee_cap, eee_adv;
 		u32 lp, cap, adv;
 		int status;
-		unsigned int idx;
 
 		/* Read phy status to properly get the right settings */
 		status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@
 
 		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
-		idx = phy_find_setting(phydev->speed, phydev->duplex);
-		if (!(lp & adv & settings[idx].setting))
+		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
 			goto eee_exit_err;
 
 		if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274..7d39484 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
 
 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 {
-	struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
-	return team_port_exists(dev) ? port : NULL;
+	return rcu_dereference(dev->rx_handler_data);
 }
 
 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
@@ -1732,11 +1730,11 @@
 	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	rcu_read_lock();
-	list_for_each_entry_rcu(port, &team->port_list, list)
+	mutex_lock(&team->lock);
+	list_for_each_entry(port, &team->port_list, list)
 		if (team->ops.port_change_dev_addr)
 			team->ops.port_change_dev_addr(team, port);
-	rcu_read_unlock();
+	mutex_unlock(&team->lock);
 	return 0;
 }
 
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3bd9678..7ba8d08 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@
 	    * Linksys USB200M
 	    * Netgear FA120
 	    * Sitecom LN-029
+	    * Sitecom LN-028
 	    * Intellinet USB 2.0 Ethernet
 	    * ST Lab USB 2.0 Ethernet
 	    * TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792..1173a24 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@
 	USB_DEVICE (0x0df6, 0x0056),
 	.driver_info =  (unsigned long) &ax88178_info,
 }, {
+	// Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+	USB_DEVICE (0x0df6, 0x061c),
+	.driver_info =  (unsigned long) &ax88178_info,
+}, {
 	// corega FEther USB2-TX
 	USB_DEVICE (0x07aa, 0x0017),
 	.driver_info =  (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708..fe48f4c 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -300,9 +300,18 @@
 	.tx_fixup	= cx82310_tx_fixup,
 };
 
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
+	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+		       USB_DEVICE_ID_MATCH_DEV_INFO, \
+	.idVendor = (vend), \
+	.idProduct = (prod), \
+	.bDeviceClass = (cl), \
+	.bDeviceSubClass = (sc), \
+	.bDeviceProtocol = (pr)
+
 static const struct usb_device_id products[] = {
 	{
-		USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
+		USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
 		.driver_info = (unsigned long) &cx82310_info
 	},
 	{ },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9cdfb3f..778e915 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1594,7 +1594,7 @@
 		}
 		cprev = cnow;
 	}
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&tiocmget->waitq, &wait);
 
 	return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0..1bfe0fc 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@
 }, {
 	USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
 	.driver_info =  (unsigned long) &prolific_info,
+}, {
+	USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
+					 * Host-to-Host Cable
+					 */
+	.driver_info =  (unsigned long) &prolific_info,
 },
 
 	{ },		// END
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff366..59b0e97 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@
 {
 	int i;
 
-	for (i = 0; i < vi->max_queue_pairs; i++)
+	for (i = 0; i < vi->max_queue_pairs; i++) {
+		napi_hash_del(&vi->rq[i].napi);
 		netif_napi_del(&vi->rq[i].napi);
+	}
 
 	kfree(vi->rq);
 	kfree(vi->sq);
@@ -1948,11 +1950,8 @@
 	cancel_delayed_work_sync(&vi->refill);
 
 	if (netif_running(vi->dev)) {
-		for (i = 0; i < vi->max_queue_pairs; i++) {
+		for (i = 0; i < vi->max_queue_pairs; i++)
 			napi_disable(&vi->rq[i].napi);
-			napi_hash_del(&vi->rq[i].napi);
-			netif_napi_del(&vi->rq[i].napi);
-		}
 	}
 
 	remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775..f8528a4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@
 			goto drop;
 
 		flags &= ~VXLAN_HF_RCO;
-		vni &= VXLAN_VID_MASK;
+		vni &= VXLAN_VNI_MASK;
 	}
 
 	/* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@
 		flags &= ~VXLAN_GBP_USED_BITS;
 	}
 
-	if (flags || (vni & ~VXLAN_VID_MASK)) {
+	if (flags || vni & ~VXLAN_VNI_MASK) {
 		/* If there are any unprocessed flags remaining treat
 		 * this as a malformed packet. This behavior diverges from
 		 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2..88d121d 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@
 	spin_lock_irqsave(&cosa->lock, flags);
 	add_wait_queue(&chan->rxwaitq, &wait);
 	while (!chan->rx_status) {
-		current->state = TASK_INTERRUPTIBLE;
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_irqrestore(&cosa->lock, flags);
 		schedule();
 		spin_lock_irqsave(&cosa->lock, flags);
 		if (signal_pending(current) && chan->rx_status == 0) {
 			chan->rx_status = 1;
 			remove_wait_queue(&chan->rxwaitq, &wait);
-			current->state = TASK_RUNNING;
+			__set_current_state(TASK_RUNNING);
 			spin_unlock_irqrestore(&cosa->lock, flags);
 			mutex_unlock(&chan->rlock);
 			return -ERESTARTSYS;
 		}
 	}
 	remove_wait_queue(&chan->rxwaitq, &wait);
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 	kbuf = chan->rxdata;
 	count = chan->rxsize;
 	spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@
 	spin_lock_irqsave(&cosa->lock, flags);
 	add_wait_queue(&chan->txwaitq, &wait);
 	while (!chan->tx_status) {
-		current->state = TASK_INTERRUPTIBLE;
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_unlock_irqrestore(&cosa->lock, flags);
 		schedule();
 		spin_lock_irqsave(&cosa->lock, flags);
 		if (signal_pending(current) && chan->tx_status == 0) {
 			chan->tx_status = 1;
 			remove_wait_queue(&chan->txwaitq, &wait);
-			current->state = TASK_RUNNING;
+			__set_current_state(TASK_RUNNING);
 			chan->tx_status = 1;
 			spin_unlock_irqrestore(&cosa->lock, flags);
 			up(&chan->wsem);
@@ -905,7 +905,7 @@
 		}
 	}
 	remove_wait_queue(&chan->txwaitq, &wait);
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 	up(&chan->wsem);
 	spin_unlock_irqrestore(&cosa->lock, flags);
 	kfree(kbuf);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05..75345c1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@
 	case 0x432a: /* BCM4321 */
 	case 0x432d: /* BCM4322 */
 	case 0x4352: /* BCM43222 */
+	case 0x435a: /* BCM43228 */
 	case 0x4333: /* BCM4331 */
 	case 0x43a2: /* BCM4360 */
 	case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf70..8eff275 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@
 	void *dcmd_buf = NULL, *wr_pointer;
 	u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
 
-	brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
-		  cmdhdr->len);
+	if (len < sizeof(*cmdhdr)) {
+		brcmf_err("vendor command too short: %d\n", len);
+		return -EINVAL;
+	}
 
 	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
 	ifp = vif->ifp;
 
-	len -= sizeof(struct brcmf_vndr_dcmd_hdr);
+	brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
+
+	if (cmdhdr->offset > len) {
+		brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
+		return -EINVAL;
+	}
+
+	len -= cmdhdr->offset;
 	ret_len = cmdhdr->len;
 	if (ret_len > 0 || len > 0) {
 		if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fa..06f6cc0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@
 	.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
 	.base_params = &iwl1000_base_params,			\
 	.eeprom_params = &iwl1000_eeprom_params,		\
-	.led_mode = IWL_LED_BLINK
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl1000_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@
 	.base_params = &iwl1000_base_params,			\
 	.eeprom_params = &iwl1000_eeprom_params,		\
 	.led_mode = IWL_LED_RF_STATE,				\
-	.rx_with_siso_diversity = true
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl100_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d08..890b95f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@
 	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,		\
 	.base_params = &iwl2000_base_params,			\
 	.eeprom_params = &iwl20x0_eeprom_params,		\
-	.led_mode = IWL_LED_RF_STATE
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+
 
 const struct iwl_cfg iwl2000_2bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@
 	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
 	.base_params = &iwl2030_base_params,			\
 	.eeprom_params = &iwl20x0_eeprom_params,		\
-	.led_mode = IWL_LED_RF_STATE
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl2030_2bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@
 	.base_params = &iwl2000_base_params,			\
 	.eeprom_params = &iwl20x0_eeprom_params,		\
 	.led_mode = IWL_LED_RF_STATE,				\
-	.rx_with_siso_diversity = true
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl105_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@
 	.base_params = &iwl2030_base_params,			\
 	.eeprom_params = &iwl20x0_eeprom_params,		\
 	.led_mode = IWL_LED_RF_STATE,				\
-	.rx_with_siso_diversity = true
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl135_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbed..724194e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@
 	.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION,	\
 	.base_params = &iwl5000_base_params,			\
 	.eeprom_params = &iwl5000_eeprom_params,		\
-	.led_mode = IWL_LED_BLINK
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl5300_agn_cfg = {
 	.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@
 	.base_params = &iwl5000_base_params,			\
 	.eeprom_params = &iwl5000_eeprom_params,		\
 	.led_mode = IWL_LED_BLINK,				\
-	.internal_wimax_coex = true
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl5150_agn_cfg = {
 	.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8..21b2630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@
 	.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
 	.base_params = &iwl6000_g2_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
-	.led_mode = IWL_LED_RF_STATE
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6005_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@
 	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
 	.base_params = &iwl6000_g2_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
-	.led_mode = IWL_LED_RF_STATE
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6030_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@
 	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
 	.base_params = &iwl6000_g2_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
-	.led_mode = IWL_LED_RF_STATE
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6035_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@
 	.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,	\
 	.base_params = &iwl6000_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
-	.led_mode = IWL_LED_BLINK
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6000i_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@
 	.base_params = &iwl6050_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
 	.led_mode = IWL_LED_BLINK,				\
-	.internal_wimax_coex = true
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6050_2agn_cfg = {
 	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@
 	.base_params = &iwl6050_base_params,			\
 	.eeprom_params = &iwl6000_eeprom_params,		\
 	.led_mode = IWL_LED_BLINK,				\
-	.internal_wimax_coex = true
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
 
 const struct iwl_cfg iwl6150_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55..7810c41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@
 	if (!vif->bss_conf.assoc)
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-	if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
+	if (mvmvif->phy_ctxt &&
+	    IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
 			       mvmvif->phy_ctxt->id))
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3..542ee74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@
 	if (!vif->bss_conf.assoc)
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-	if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+	if (mvmvif->phy_ctxt &&
+	    data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
 	IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec0..09654e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@
 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
-		if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
+		if ((mvm->fw->ucode_capa.capa[0] &
+		     IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+		    (mvm->fw->ucode_capa.api[0] &
+		     IWL_UCODE_TLV_API_LQ_SS_PARAMS))
 			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
 	}
@@ -2215,7 +2218,19 @@
 
 	mutex_lock(&mvm->mutex);
 
-	iwl_mvm_cancel_scan(mvm);
+	/* Due to a race condition, it's possible that mac80211 asks
+	 * us to stop a hw_scan when it's already stopped.  This can
+	 * happen, for instance, if we stopped the scan ourselves,
+	 * called ieee80211_scan_completed() and the userspace called
+	 * cancel scan scan before ieee80211_scan_work() could run.
+	 * To handle that, simply return if the scan is not running.
+	*/
+	/* FIXME: for now, we ignore this race for UMAC scans, since
+	 * they don't set the scan_status.
+	 */
+	if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
+	    (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		iwl_mvm_cancel_scan(mvm);
 
 	mutex_unlock(&mvm->mutex);
 }
@@ -2559,12 +2574,29 @@
 	int ret;
 
 	mutex_lock(&mvm->mutex);
+
+	/* Due to a race condition, it's possible that mac80211 asks
+	 * us to stop a sched_scan when it's already stopped.  This
+	 * can happen, for instance, if we stopped the scan ourselves,
+	 * called ieee80211_sched_scan_stopped() and the userspace called
+	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+	 * could run.  To handle this, simply return if the scan is
+	 * not running.
+	*/
+	/* FIXME: for now, we ignore this race for UMAC scans, since
+	 * they don't set the scan_status.
+	 */
+	if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
+	    !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		mutex_unlock(&mvm->mutex);
+		return 0;
+	}
+
 	ret = iwl_mvm_scan_offload_stop(mvm, false);
 	mutex_unlock(&mvm->mutex);
 	iwl_mvm_wait_for_async_handlers(mvm);
 
 	return ret;
-
 }
 
 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f..efa9688 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@
 #define MAX_NEXT_COLUMNS 7
 #define MAX_COLUMN_CHECKS 3
 
+struct rs_tx_column;
+
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
 				     struct ieee80211_sta *sta,
-				     struct iwl_scale_tbl_info *tbl);
+				     struct iwl_scale_tbl_info *tbl,
+				     const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
 	enum rs_column_mode mode;
@@ -147,13 +150,15 @@
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			 struct iwl_scale_tbl_info *tbl)
+			 struct iwl_scale_tbl_info *tbl,
+			 const struct rs_tx_column *next_col)
 {
-	return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
+	return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  struct iwl_scale_tbl_info *tbl)
+			  struct iwl_scale_tbl_info *tbl,
+			  const struct rs_tx_column *next_col)
 {
 	if (!sta->ht_cap.ht_supported)
 		return false;
@@ -171,7 +176,8 @@
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  struct iwl_scale_tbl_info *tbl)
+			  struct iwl_scale_tbl_info *tbl,
+			  const struct rs_tx_column *next_col)
 {
 	if (!sta->ht_cap.ht_supported)
 		return false;
@@ -180,7 +186,8 @@
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			 struct iwl_scale_tbl_info *tbl)
+			 struct iwl_scale_tbl_info *tbl,
+			 const struct rs_tx_column *next_col)
 {
 	struct rs_rate *rate = &tbl->rate;
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1590,7 +1597,7 @@
 
 		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
 			allow_func = next_col->checks[j];
-			if (allow_func && !allow_func(mvm, sta, tbl))
+			if (allow_func && !allow_func(mvm, sta, tbl, next_col))
 				break;
 		}
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3c..c47c8051 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@
 	if (mvm->scan_status == IWL_MVM_SCAN_NONE)
 		return 0;
 
-	if (iwl_mvm_is_radio_killed(mvm))
+	if (iwl_mvm_is_radio_killed(mvm)) {
+		ret = 0;
 		goto out;
+	}
 
 	if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
 	    (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@
 		IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
 			       sched ? "offloaded " : "", ret);
 		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
-		return ret;
+		goto out;
 	}
 
 	IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
 		       sched ? "offloaded " : "");
 
 	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-	if (ret)
-		return ret;
-
+out:
 	/*
 	 * Clear the scan status so the next scan requests will succeed. This
 	 * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@
 	if (mvm->scan_status == IWL_MVM_SCAN_OS)
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 
-out:
 	mvm->scan_status = IWL_MVM_SCAN_NONE;
 
 	if (notify) {
@@ -1177,7 +1176,7 @@
 			ieee80211_scan_completed(mvm->hw, true);
 	}
 
-	return 0;
+	return ret;
 }
 
 static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf..f8d6f30 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -750,8 +750,7 @@
 	 * request
 	 */
 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
-		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
-		    te_data->running) {
+		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
 			mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
 			is_p2p = true;
 			goto remove_te;
@@ -766,10 +765,8 @@
 	 * request
 	 */
 	list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
-		if (te_data->running) {
-			mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-			goto remove_te;
-		}
+		mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+		goto remove_te;
 	}
 
 remove_te:
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a4c658..8908be6 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@
 		goto nla_put_failure;
 
 	genlmsg_end(skb, msg_head);
-	genlmsg_unicast(&init_net, skb, dst_portid);
+	if (genlmsg_unicast(&init_net, skb, dst_portid))
+		goto err_free_txskb;
 
 	/* Enqueue the packet */
 	skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@
 	return;
 
 nla_put_failure:
+	nlmsg_free(skb);
+err_free_txskb:
 	printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
 	ieee80211_free_txskb(hw, my_skb);
 	data->tx_failed++;
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d46774..074f716 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@
 		}
 
 		return true;
-	} else if (0x86DD == ether_type) {
-		return true;
+	} else if (ETH_P_IPV6 == ether_type) {
+		/* TODO: Handle any IPv6 cases that need special handling.
+		 * For now, always return false
+		 */
+		goto end;
 	}
 
 end:
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f38227a..3aa8648 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -340,12 +340,11 @@
 	unsigned int num_queues = vif->num_queues;
 	int i;
 	unsigned int queue_index;
-	struct xenvif_stats *vif_stats;
 
 	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
 		unsigned long accum = 0;
 		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
-			vif_stats = &vif->queues[queue_index].stats;
+			void *vif_stats = &vif->queues[queue_index].stats;
 			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
 		}
 		data[i] = accum;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2..997cf09 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@
 static void make_tx_response(struct xenvif_queue *queue,
 			     struct xen_netif_tx_request *txp,
 			     s8       st);
+static void push_tx_responses(struct xenvif_queue *queue);
 
 static inline int tx_work_todo(struct xenvif_queue *queue);
 
@@ -657,6 +658,7 @@
 	do {
 		spin_lock_irqsave(&queue->response_lock, flags);
 		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+		push_tx_responses(queue);
 		spin_unlock_irqrestore(&queue->response_lock, flags);
 		if (cons == end)
 			break;
@@ -1343,7 +1345,7 @@
 {
 	unsigned int offset = skb_headlen(skb);
 	skb_frag_t frags[MAX_SKB_FRAGS];
-	int i;
+	int i, f;
 	struct ubuf_info *uarg;
 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
 
@@ -1383,23 +1385,25 @@
 		frags[i].page_offset = 0;
 		skb_frag_size_set(&frags[i], len);
 	}
-	/* swap out with old one */
-	memcpy(skb_shinfo(skb)->frags,
-	       frags,
-	       i * sizeof(skb_frag_t));
-	skb_shinfo(skb)->nr_frags = i;
-	skb->truesize += i * PAGE_SIZE;
 
-	/* remove traces of mapped pages and frag_list */
+	/* Copied all the bits from the frag list -- free it. */
 	skb_frag_list_init(skb);
+	xenvif_skb_zerocopy_prepare(queue, nskb);
+	kfree_skb(nskb);
+
+	/* Release all the original (foreign) frags. */
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		skb_frag_unref(skb, f);
 	uarg = skb_shinfo(skb)->destructor_arg;
 	/* increase inflight counter to offset decrement in callback */
 	atomic_inc(&queue->inflight_packets);
 	uarg->callback(uarg, true);
 	skb_shinfo(skb)->destructor_arg = NULL;
 
-	xenvif_skb_zerocopy_prepare(queue, nskb);
-	kfree_skb(nskb);
+	/* Fill the skb with the new (local) frags. */
+	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
+	skb_shinfo(skb)->nr_frags = i;
+	skb->truesize += i * PAGE_SIZE;
 
 	return 0;
 }
@@ -1652,13 +1656,20 @@
 	unsigned long flags;
 
 	pending_tx_info = &queue->pending_tx_info[pending_idx];
+
 	spin_lock_irqsave(&queue->response_lock, flags);
+
 	make_tx_response(queue, &pending_tx_info->req, status);
-	index = pending_index(queue->pending_prod);
+
+	/* Release the pending index before pusing the Tx response so
+	 * its available before a new Tx request is pushed by the
+	 * frontend.
+	 */
+	index = pending_index(queue->pending_prod++);
 	queue->pending_ring[index] = pending_idx;
-	/* TX shouldn't use the index before we give it back here */
-	mb();
-	queue->pending_prod++;
+
+	push_tx_responses(queue);
+
 	spin_unlock_irqrestore(&queue->response_lock, flags);
 }
 
@@ -1669,7 +1680,6 @@
 {
 	RING_IDX i = queue->tx.rsp_prod_pvt;
 	struct xen_netif_tx_response *resp;
-	int notify;
 
 	resp = RING_GET_RESPONSE(&queue->tx, i);
 	resp->id     = txp->id;
@@ -1679,6 +1689,12 @@
 		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
 
 	queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+	int notify;
+
 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
 	if (notify)
 		notify_remote_via_irq(queue->tx_irq);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 38d1c51..7bcaeec 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -84,8 +84,7 @@
 	bool
 
 config OF_OVERLAY
-	bool
-	depends on OF
+	bool "Device Tree overlays"
 	select OF_DYNAMIC
 	select OF_RESOLVE
 
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0a8aeb8..adb8764 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -714,16 +714,17 @@
 						const char *path)
 {
 	struct device_node *child;
-	int len = strchrnul(path, '/') - path;
-	int term;
+	int len;
+	const char *end;
 
+	end = strchr(path, ':');
+	if (!end)
+		end = strchrnul(path, '/');
+
+	len = end - path;
 	if (!len)
 		return NULL;
 
-	term = strchrnul(path, ':') - path;
-	if (term < len)
-		len = term;
-
 	__for_each_child_of_node(parent, child) {
 		const char *name = strrchr(child->full_name, '/');
 		if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +769,12 @@
 
 	/* The path could begin with an alias */
 	if (*path != '/') {
-		char *p = strchrnul(path, '/');
-		int len = separator ? separator - path : p - path;
+		int len;
+		const char *p = separator;
+
+		if (!p)
+			p = strchrnul(path, '/');
+		len = p - path;
 
 		/* of_aliases must not be NULL */
 		if (!of_aliases)
@@ -794,6 +799,8 @@
 		path++; /* Increment past '/' delimiter */
 		np = __of_find_node_by_path(np, path);
 		path = strchrnul(path, '/');
+		if (separator && separator < path)
+			break;
 	}
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 	return np;
@@ -1886,8 +1893,10 @@
 			name = of_get_property(of_chosen, "linux,stdout-path", NULL);
 		if (IS_ENABLED(CONFIG_PPC) && !name)
 			name = of_get_property(of_aliases, "stdout", NULL);
-		if (name)
+		if (name) {
 			of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
+			add_preferred_console("stdout-path", 0, NULL);
+		}
 	}
 
 	if (!of_aliases)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 352b4f2..dee9270 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -19,6 +19,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/idr.h>
 
 #include "of_private.h"
 
@@ -85,7 +86,7 @@
 		struct device_node *target, struct device_node *child)
 {
 	const char *cname;
-	struct device_node *tchild, *grandchild;
+	struct device_node *tchild;
 	int ret = 0;
 
 	cname = kbasename(child->full_name);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0cf9a23..aba8946 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -92,6 +92,11 @@
 		 "option path test failed\n");
 	of_node_put(np);
 
+	np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
+	selftest(np && !strcmp("test/option", options),
+		 "option path test, subcase #1 failed\n");
+	of_node_put(np);
+
 	np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
 	selftest(np, "NULL option path test failed\n");
 	of_node_put(np);
@@ -102,6 +107,12 @@
 		 "option alias path test failed\n");
 	of_node_put(np);
 
+	np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
+				       &options);
+	selftest(np && !strcmp("test/alias/option", options),
+		 "option alias path test, subcase #1 failed\n");
+	of_node_put(np);
+
 	np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
 	selftest(np, "NULL option alias path test failed\n");
 	of_node_put(np);
@@ -378,9 +389,9 @@
 	rc = of_property_match_string(np, "phandle-list-names", "first");
 	selftest(rc == 0, "first expected:0 got:%i\n", rc);
 	rc = of_property_match_string(np, "phandle-list-names", "second");
-	selftest(rc == 1, "second expected:0 got:%i\n", rc);
+	selftest(rc == 1, "second expected:1 got:%i\n", rc);
 	rc = of_property_match_string(np, "phandle-list-names", "third");
-	selftest(rc == 2, "third expected:0 got:%i\n", rc);
+	selftest(rc == 2, "third expected:2 got:%i\n", rc);
 	rc = of_property_match_string(np, "phandle-list-names", "fourth");
 	selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
 	rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +489,6 @@
 	struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
 	struct of_changeset chgset;
 
-	of_changeset_init(&chgset);
 	n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
 	selftest(n1, "testcase setup failure\n");
 	n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +989,7 @@
 	return pdev != NULL;
 }
 
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
 
 /* get the i2c client device instantiated at the path */
 static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1455,7 @@
 		return;
 }
 
-#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
+#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
 
 struct selftest_i2c_bus_data {
 	struct platform_device	*pdev;
@@ -1584,7 +1594,7 @@
 	.id_table = selftest_i2c_dev_id,
 };
 
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
 
 struct selftest_i2c_mux_data {
 	int nchans;
@@ -1695,7 +1705,7 @@
 			"could not register selftest i2c bus driver\n"))
 		return ret;
 
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
 	ret = i2c_add_driver(&selftest_i2c_mux_driver);
 	if (selftest(ret == 0,
 			"could not register selftest i2c mux driver\n"))
@@ -1707,7 +1717,7 @@
 
 static void of_selftest_overlay_i2c_cleanup(void)
 {
-#if IS_ENABLED(CONFIG_I2C_MUX)
+#if IS_BUILTIN(CONFIG_I2C_MUX)
 	i2c_del_driver(&selftest_i2c_mux_driver);
 #endif
 	platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1824,7 @@
 	of_selftest_overlay_10();
 	of_selftest_overlay_11();
 
-#if IS_ENABLED(CONFIG_I2C)
+#if IS_BUILTIN(CONFIG_I2C)
 	if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
 		goto out;
 
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 1ec694a..464bf49 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,7 +80,7 @@
 	if (err)
 		return err;
 
-	resource_list_for_each_entry(win, res, list) {
+	resource_list_for_each_entry(win, res) {
 		struct resource *parent, *res = win->res;
 
 		switch (resource_type(res)) {
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index aab5547..ee082c0 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -127,7 +127,7 @@
 	return false;
 }
 
-static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 			      int offset)
 {
 	struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@
 		return NULL;
 
 	xgene_pcie_set_rtdid_reg(bus, devfn);
-	return xgene_pcie_get_cfg_base(bus);
+	return xgene_pcie_get_cfg_base(bus) + offset;
 }
 
 static struct pci_ops xgene_pcie_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index aa012fb..312f23a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,7 +521,8 @@
 	struct pci_dev *pdev = to_pci_dev(dev);
 	char *driver_override, *old = pdev->driver_override, *cp;
 
-	if (count > PATH_MAX)
+	/* We need to keep extra room for a newline */
+	if (count >= (PAGE_SIZE - 1))
 		return -EINVAL;
 
 	driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 
-	return sprintf(buf, "%s\n", pdev->driver_override);
+	return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
 }
 static DEVICE_ATTR_RW(driver_override);
 
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5afe03e..2062c22 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -66,6 +66,10 @@
 #define BYT_DIR_MASK		(BIT(1) | BIT(2))
 #define BYT_TRIG_MASK		(BIT(26) | BIT(25) | BIT(24))
 
+#define BYT_CONF0_RESTORE_MASK	(BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
+				 BYT_PIN_MUX)
+#define BYT_VAL_RESTORE_MASK	(BYT_DIR_MASK | BYT_LEVEL)
+
 #define BYT_NGPIO_SCORE		102
 #define BYT_NGPIO_NCORE		28
 #define BYT_NGPIO_SUS		44
@@ -134,12 +138,18 @@
 	},
 };
 
+struct byt_gpio_pin_context {
+	u32 conf0;
+	u32 val;
+};
+
 struct byt_gpio {
 	struct gpio_chip		chip;
 	struct platform_device		*pdev;
 	spinlock_t			lock;
 	void __iomem			*reg_base;
 	struct pinctrl_gpio_range	*range;
+	struct byt_gpio_pin_context	*saved_context;
 };
 
 #define to_byt_gpio(c)	container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@
 	return vg->reg_base + reg_offset + reg;
 }
 
-static bool is_special_pin(struct byt_gpio *vg, unsigned offset)
+static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
+{
+	void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+	unsigned long flags;
+	u32 value;
+
+	spin_lock_irqsave(&vg->lock, flags);
+	value = readl(reg);
+	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+	writel(value, reg);
+	spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
 {
 	/* SCORE pin 92-93 */
 	if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
 		offset >= 92 && offset <= 93)
-		return true;
+		return 1;
 
 	/* SUS pin 11-21 */
 	if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
 		offset >= 11 && offset <= 21)
-		return true;
+		return 1;
 
-	return false;
+	return 0;
 }
 
 static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
-	u32 value;
-	bool special;
+	u32 value, gpio_mux;
 
 	/*
 	 * In most cases, func pin mux 000 means GPIO function.
 	 * But, some pins may have func pin mux 001 represents
-	 * GPIO function. Only allow user to export pin with
-	 * func pin mux preset as GPIO function by BIOS/FW.
+	 * GPIO function.
+	 *
+	 * Because there are devices out there where some pins were not
+	 * configured correctly we allow changing the mux value from
+	 * request (but print out warning about that).
 	 */
 	value = readl(reg) & BYT_PIN_MUX;
-	special = is_special_pin(vg, offset);
-	if ((special && value != 1) || (!special && value)) {
-		dev_err(&vg->pdev->dev,
-			"pin %u cannot be used as GPIO.\n", offset);
-		return -EINVAL;
+	gpio_mux = byt_get_gpio_mux(vg, offset);
+	if (WARN_ON(gpio_mux != value)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&vg->lock, flags);
+		value = readl(reg) & ~BYT_PIN_MUX;
+		value |= gpio_mux;
+		writel(value, reg);
+		spin_unlock_irqrestore(&vg->lock, flags);
+
+		dev_warn(&vg->pdev->dev,
+			 "pin %u forcibly re-configured as GPIO\n", offset);
 	}
 
 	pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@
 static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
 {
 	struct byt_gpio *vg = to_byt_gpio(chip);
-	void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
-	u32 value;
 
-	/* clear interrupt triggering */
-	value = readl(reg);
-	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
-	writel(value, reg);
-
+	byt_gpio_clear_triggering(vg, offset);
 	pm_runtime_put(&vg->pdev->dev);
 }
 
@@ -236,23 +262,13 @@
 	value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
 		   BYT_TRIG_LVL);
 
-	switch (type) {
-	case IRQ_TYPE_LEVEL_HIGH:
-		value |= BYT_TRIG_LVL;
-	case IRQ_TYPE_EDGE_RISING:
-		value |= BYT_TRIG_POS;
-		break;
-	case IRQ_TYPE_LEVEL_LOW:
-		value |= BYT_TRIG_LVL;
-	case IRQ_TYPE_EDGE_FALLING:
-		value |= BYT_TRIG_NEG;
-		break;
-	case IRQ_TYPE_EDGE_BOTH:
-		value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
-		break;
-	}
 	writel(value, reg);
 
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
+	else if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+
 	spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
@@ -410,58 +426,80 @@
 	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
-	u32 base, pin, mask;
+	u32 base, pin;
 	void __iomem *reg;
-	u32 pending;
+	unsigned long pending;
 	unsigned virq;
-	int looplimit = 0;
 
 	/* check from GPIO controller which pin triggered the interrupt */
 	for (base = 0; base < vg->chip.ngpio; base += 32) {
-
 		reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
-
-		while ((pending = readl(reg))) {
-			pin = __ffs(pending);
-			mask = BIT(pin);
-			/* Clear before handling so we can't lose an edge */
-			writel(mask, reg);
-
+		pending = readl(reg);
+		for_each_set_bit(pin, &pending, 32) {
 			virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
 			generic_handle_irq(virq);
-
-			/* In case bios or user sets triggering incorretly a pin
-			 * might remain in "interrupt triggered" state.
-			 */
-			if (looplimit++ > 32) {
-				dev_err(&vg->pdev->dev,
-					"Gpio %d interrupt flood, disabling\n",
-					base + pin);
-
-				reg = byt_gpio_reg(&vg->chip, base + pin,
-						   BYT_CONF0_REG);
-				mask = readl(reg);
-				mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
-					  BYT_TRIG_LVL);
-				writel(mask, reg);
-				mask = readl(reg); /* flush */
-				break;
-			}
 		}
 	}
 	chip->irq_eoi(data);
 }
 
+static void byt_irq_ack(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct byt_gpio *vg = to_byt_gpio(gc);
+	unsigned offset = irqd_to_hwirq(d);
+	void __iomem *reg;
+
+	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
+	writel(BIT(offset % 32), reg);
+}
+
 static void byt_irq_unmask(struct irq_data *d)
 {
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct byt_gpio *vg = to_byt_gpio(gc);
+	unsigned offset = irqd_to_hwirq(d);
+	unsigned long flags;
+	void __iomem *reg;
+	u32 value;
+
+	spin_lock_irqsave(&vg->lock, flags);
+
+	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+	value = readl(reg);
+
+	switch (irqd_get_trigger_type(d)) {
+	case IRQ_TYPE_LEVEL_HIGH:
+		value |= BYT_TRIG_LVL;
+	case IRQ_TYPE_EDGE_RISING:
+		value |= BYT_TRIG_POS;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		value |= BYT_TRIG_LVL;
+	case IRQ_TYPE_EDGE_FALLING:
+		value |= BYT_TRIG_NEG;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
+		break;
+	}
+
+	writel(value, reg);
+
+	spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_irq_mask(struct irq_data *d)
 {
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct byt_gpio *vg = to_byt_gpio(gc);
+
+	byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
 }
 
 static struct irq_chip byt_irqchip = {
 	.name = "BYT-GPIO",
+	.irq_ack = byt_irq_ack,
 	.irq_mask = byt_irq_mask,
 	.irq_unmask = byt_irq_unmask,
 	.irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@
 {
 	void __iomem *reg;
 	u32 base, value;
+	int i;
+
+	/*
+	 * Clear interrupt triggers for all pins that are GPIOs and
+	 * do not use direct IRQ mode. This will prevent spurious
+	 * interrupts from misconfigured pins.
+	 */
+	for (i = 0; i < vg->chip.ngpio; i++) {
+		value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
+		if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
+		    !(value & BYT_DIRECT_IRQ_EN)) {
+			byt_gpio_clear_triggering(vg, i);
+			dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+		}
+	}
 
 	/* clear interrupt status trigger registers */
 	for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@
 	gc->can_sleep = false;
 	gc->dev = dev;
 
+#ifdef CONFIG_PM_SLEEP
+	vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
+				       sizeof(*vg->saved_context), GFP_KERNEL);
+#endif
+
 	ret = gpiochip_add(gc);
 	if (ret) {
 		dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int byt_gpio_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct byt_gpio *vg = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < vg->chip.ngpio; i++) {
+		void __iomem *reg;
+		u32 value;
+
+		reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+		value = readl(reg) & BYT_CONF0_RESTORE_MASK;
+		vg->saved_context[i].conf0 = value;
+
+		reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+		value = readl(reg) & BYT_VAL_RESTORE_MASK;
+		vg->saved_context[i].val = value;
+	}
+
+	return 0;
+}
+
+static int byt_gpio_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct byt_gpio *vg = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < vg->chip.ngpio; i++) {
+		void __iomem *reg;
+		u32 value;
+
+		reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+		value = readl(reg);
+		if ((value & BYT_CONF0_RESTORE_MASK) !=
+		     vg->saved_context[i].conf0) {
+			value &= ~BYT_CONF0_RESTORE_MASK;
+			value |= vg->saved_context[i].conf0;
+			writel(value, reg);
+			dev_info(dev, "restored pin %d conf0 %#08x", i, value);
+		}
+
+		reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+		value = readl(reg);
+		if ((value & BYT_VAL_RESTORE_MASK) !=
+		     vg->saved_context[i].val) {
+			u32 v;
+
+			v = value & ~BYT_VAL_RESTORE_MASK;
+			v |= vg->saved_context[i].val;
+			if (v != value) {
+				writel(v, reg);
+				dev_dbg(dev, "restored pin %d val %#08x\n",
+					i, v);
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
 static int byt_gpio_runtime_suspend(struct device *dev)
 {
 	return 0;
@@ -580,8 +701,9 @@
 }
 
 static const struct dev_pm_ops byt_gpio_pm_ops = {
-	.runtime_suspend = byt_gpio_runtime_suspend,
-	.runtime_resume = byt_gpio_runtime_resume,
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
+	SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
+			   NULL)
 };
 
 static const struct acpi_device_id byt_gpio_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3034fd0..82f691e 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1226,6 +1226,7 @@
 static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 				     int value)
 {
+	chv_gpio_set(chip, offset, value);
 	return pinctrl_gpio_direction_output(chip->base + offset);
 }
 
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f4cd0b9..a481406 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1477,28 +1477,25 @@
 	/* the interrupt is already cleared before by reading ISR */
 }
 
-static unsigned int gpio_irq_startup(struct irq_data *d)
+static int gpio_irq_request_res(struct irq_data *d)
 {
 	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
 	unsigned	pin = d->hwirq;
 	int ret;
 
 	ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
-	if (ret) {
+	if (ret)
 		dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
 			d->hwirq);
-		return ret;
-	}
-	gpio_irq_unmask(d);
-	return 0;
+
+	return ret;
 }
 
-static void gpio_irq_shutdown(struct irq_data *d)
+static void gpio_irq_release_res(struct irq_data *d)
 {
 	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
 	unsigned	pin = d->hwirq;
 
-	gpio_irq_mask(d);
 	gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
 }
 
@@ -1577,8 +1574,8 @@
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
 	.irq_ack	= gpio_irq_ack,
-	.irq_startup	= gpio_irq_startup,
-	.irq_shutdown	= gpio_irq_shutdown,
+	.irq_request_resources = gpio_irq_request_res,
+	.irq_release_resources = gpio_irq_release_res,
 	.irq_disable	= gpio_irq_mask,
 	.irq_mask	= gpio_irq_mask,
 	.irq_unmask	= gpio_irq_unmask,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 24c5d88..3c68a8e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1011,6 +1011,7 @@
 	.pins = sun4i_a10_pins,
 	.npins = ARRAY_SIZE(sun4i_a10_pins),
 	.irq_banks = 1,
+	.irq_read_needs_mux = true,
 };
 
 static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3d07443..f8e171b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 
 #include "../core.h"
+#include "../../gpio/gpiolib.h"
 #include "pinctrl-sunxi.h"
 
 static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@
 static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
-
 	u32 reg = sunxi_data_reg(offset);
 	u8 index = sunxi_data_offset(offset);
-	u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+	u32 set_mux = pctl->desc->irq_read_needs_mux &&
+			test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+	u32 val;
+
+	if (set_mux)
+		sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
+
+	val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+
+	if (set_mux)
+		sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
 
 	return val;
 }
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 5a51523..e248e81 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -77,6 +77,9 @@
 #define IRQ_LEVEL_LOW		0x03
 #define IRQ_EDGE_BOTH		0x04
 
+#define SUN4I_FUNC_INPUT	0
+#define SUN4I_FUNC_IRQ		6
+
 struct sunxi_desc_function {
 	const char	*name;
 	u8		muxval;
@@ -94,6 +97,7 @@
 	int				npins;
 	unsigned			pin_base;
 	unsigned			irq_banks;
+	bool				irq_read_needs_mux;
 };
 
 struct sunxi_pinctrl_function {
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 638e7970..9752761 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -735,6 +735,31 @@
 	  functionality.  If in doubt, say Y here; it will only load on
 	  supported platforms.
 
+config INTEL_IMR
+	bool "Intel Isolated Memory Region support"
+	default n
+	depends on X86_INTEL_QUARK && IOSF_MBI
+	---help---
+	  This option provides a means to manipulate Isolated Memory Regions.
+	  IMRs are a set of registers that define read and write access masks
+	  to prohibit certain system agents from accessing memory with 1 KiB
+	  granularity.
+
+	  IMRs make it possible to control read/write access to an address
+	  by hardware agents inside the SoC. Read and write masks can be
+	  defined for:
+		- eSRAM flush
+		- Dirty CPU snoop (write only)
+		- RMU access
+		- PCI Virtual Channel 0/Virtual Channel 1
+		- SMM mode
+		- Non SMM mode
+
+	  Quark contains a set of eight IMR registers and makes use of those
+	  registers during its bootup process.
+
+	  If you are running on a Galileo/Quark say Y here.
+
 config IBM_RTL
 	tristate "Device driver to enable PRTL support"
 	depends on X86 && PCI
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 782e822..f980ff7 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -179,8 +179,9 @@
 	/* check if the resource is already in use, skip if the
 	 * device is active because it itself may be in use */
 	if (!dev->active) {
-		if (__check_region(&ioport_resource, *port, length(port, end)))
+		if (!request_region(*port, length(port, end), "pnp"))
 			return 0;
+		release_region(*port, length(port, end));
 	}
 
 	/* check if the resource is reserved */
@@ -241,8 +242,9 @@
 	/* check if the resource is already in use, skip if the
 	 * device is active because it itself may be in use */
 	if (!dev->active) {
-		if (check_mem_region(*addr, length(addr, end)))
+		if (!request_mem_region(*addr, length(addr, end), "pnp"))
 			return 0;
+		release_mem_region(*addr, length(addr, end));
 	}
 
 	/* check if the resource is reserved */
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b899947..a4a8a6d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1839,10 +1839,12 @@
 	}
 
 	if (rdev->ena_pin) {
-		ret = regulator_ena_gpio_ctrl(rdev, true);
-		if (ret < 0)
-			return ret;
-		rdev->ena_gpio_state = 1;
+		if (!rdev->ena_gpio_state) {
+			ret = regulator_ena_gpio_ctrl(rdev, true);
+			if (ret < 0)
+				return ret;
+			rdev->ena_gpio_state = 1;
+		}
 	} else if (rdev->desc->ops->enable) {
 		ret = rdev->desc->ops->enable(rdev);
 		if (ret < 0)
@@ -1939,10 +1941,12 @@
 	trace_regulator_disable(rdev_get_name(rdev));
 
 	if (rdev->ena_pin) {
-		ret = regulator_ena_gpio_ctrl(rdev, false);
-		if (ret < 0)
-			return ret;
-		rdev->ena_gpio_state = 0;
+		if (rdev->ena_gpio_state) {
+			ret = regulator_ena_gpio_ctrl(rdev, false);
+			if (ret < 0)
+				return ret;
+			rdev->ena_gpio_state = 0;
+		}
 
 	} else if (rdev->desc->ops->disable) {
 		ret = rdev->desc->ops->disable(rdev);
@@ -3444,13 +3448,6 @@
 	if (attr == &dev_attr_requested_microamps.attr)
 		return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
 
-	/* all the other attributes exist to support constraints;
-	 * don't show them if there are no constraints, or if the
-	 * relevant supporting methods are missing.
-	 */
-	if (!rdev->constraints)
-		return 0;
-
 	/* constraints need specific supporting methods */
 	if (attr == &dev_attr_min_microvolts.attr ||
 	    attr == &dev_attr_max_microvolts.attr)
@@ -3633,12 +3630,6 @@
 				 config->ena_gpio, ret);
 			goto wash;
 		}
-
-		if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
-			rdev->ena_gpio_state = 1;
-
-		if (config->ena_gpio_invert)
-			rdev->ena_gpio_state = !rdev->ena_gpio_state;
 	}
 
 	/* set regulator constraints */
@@ -3807,9 +3798,11 @@
 	list_for_each_entry(rdev, &regulator_list, list) {
 		mutex_lock(&rdev->mutex);
 		if (rdev->use_count > 0  || rdev->constraints->always_on) {
-			error = _regulator_do_enable(rdev);
-			if (error)
-				ret = error;
+			if (!_regulator_is_enabled(rdev)) {
+				error = _regulator_do_enable(rdev);
+				if (error)
+					ret = error;
+			}
 		} else {
 			if (!have_full_constraints())
 				goto unlock;
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index bc61001..f0489cb 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -152,6 +152,15 @@
 	config.regmap = chip->regmap;
 	config.of_node = dev->of_node;
 
+	/* Mask all interrupt sources to deassert interrupt line */
+	error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0);
+	if (!error)
+		error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0);
+	if (error) {
+		dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error);
+		return error;
+	}
+
 	rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 1f93b75..3fd4435 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -235,6 +235,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(0),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG2",
@@ -249,6 +250,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(1),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG3",
@@ -263,6 +265,7 @@
 		.vsel_mask = RK808_BUCK4_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(2),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG4",
@@ -277,6 +280,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(3),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG5",
@@ -291,6 +295,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(4),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG6",
@@ -305,6 +310,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(5),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG7",
@@ -319,6 +325,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(6),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "LDO_REG8",
@@ -333,6 +340,7 @@
 		.vsel_mask = RK808_LDO_VSEL_MASK,
 		.enable_reg = RK808_LDO_EN_REG,
 		.enable_mask = BIT(7),
+		.enable_time = 400,
 		.owner = THIS_MODULE,
 	}, {
 		.name = "SWITCH_REG1",
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e2cffe0..fb991ec 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 92f6af6..73354ee 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -951,6 +951,7 @@
 	void *bufs_va;
 	int err = 0, i;
 	size_t total_buf_space;
+	bool notify;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -1030,8 +1031,22 @@
 		}
 	}
 
+	/*
+	 * Prepare to kick but don't notify yet - we can't do this before
+	 * device is ready.
+	 */
+	notify = virtqueue_kick_prepare(vrp->rvq);
+
+	/* From this point on, we can notify and get callbacks. */
+	virtio_device_ready(vdev);
+
 	/* tell the remote processor it can start sending messages */
-	virtqueue_kick(vrp->rvq);
+	/*
+	 * this might be concurrent with callbacks, but we are only
+	 * doing notify, not a full kick here, so that's ok.
+	 */
+	if (notify)
+		virtqueue_notify(vrp->rvq);
 
 	dev_info(&vdev->dev, "rpmsg host is online\n");
 
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 70a5d94..b4f7744 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/suspend.h>
 #include <linux/uaccess.h>
 
 #include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@
 static int irq;
 static DEFINE_SPINLOCK(at91_rtc_lock);
 static u32 at91_rtc_shadow_imr;
+static bool suspended;
+static DEFINE_SPINLOCK(suspended_lock);
+static unsigned long cached_events;
+static u32 at91_rtc_imr;
 
 static void at91_rtc_write_ier(u32 mask)
 {
@@ -290,7 +295,9 @@
 	struct rtc_device *rtc = platform_get_drvdata(pdev);
 	unsigned int rtsr;
 	unsigned long events = 0;
+	int ret = IRQ_NONE;
 
+	spin_lock(&suspended_lock);
 	rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
 	if (rtsr) {		/* this interrupt is shared!  Is it ours? */
 		if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@
 
 		at91_rtc_write(AT91_RTC_SCCR, rtsr);	/* clear status reg */
 
-		rtc_update_irq(rtc, 1, events);
+		if (!suspended) {
+			rtc_update_irq(rtc, 1, events);
 
-		dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
-			events >> 8, events & 0x000000FF);
+			dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
+				__func__, events >> 8, events & 0x000000FF);
+		} else {
+			cached_events |= events;
+			at91_rtc_write_idr(at91_rtc_imr);
+			pm_system_wakeup();
+		}
 
-		return IRQ_HANDLED;
+		ret = IRQ_HANDLED;
 	}
-	return IRQ_NONE;		/* not handled */
+	spin_lock(&suspended_lock);
+
+	return ret;
 }
 
 static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@
 					AT91_RTC_CALEV);
 
 	ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
-				IRQF_SHARED,
-				"at91_rtc", pdev);
+			       IRQF_SHARED | IRQF_COND_SUSPEND,
+			       "at91_rtc", pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
 		return ret;
@@ -454,8 +469,6 @@
 
 /* AT91RM9200 RTC Power management control */
 
-static u32 at91_rtc_imr;
-
 static int at91_rtc_suspend(struct device *dev)
 {
 	/* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@
 	at91_rtc_imr = at91_rtc_read_imr()
 			& (AT91_RTC_ALARM|AT91_RTC_SECEV);
 	if (at91_rtc_imr) {
-		if (device_may_wakeup(dev))
+		if (device_may_wakeup(dev)) {
+			unsigned long flags;
+
 			enable_irq_wake(irq);
-		else
+
+			spin_lock_irqsave(&suspended_lock, flags);
+			suspended = true;
+			spin_unlock_irqrestore(&suspended_lock, flags);
+		} else {
 			at91_rtc_write_idr(at91_rtc_imr);
+		}
 	}
 	return 0;
 }
 
 static int at91_rtc_resume(struct device *dev)
 {
+	struct rtc_device *rtc = dev_get_drvdata(dev);
+
 	if (at91_rtc_imr) {
-		if (device_may_wakeup(dev))
+		if (device_may_wakeup(dev)) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&suspended_lock, flags);
+
+			if (cached_events) {
+				rtc_update_irq(rtc, 1, cached_events);
+				cached_events = 0;
+			}
+
+			suspended = false;
+			spin_unlock_irqrestore(&suspended_lock, flags);
+
 			disable_irq_wake(irq);
-		else
-			at91_rtc_write_ier(at91_rtc_imr);
+		}
+		at91_rtc_write_ier(at91_rtc_imr);
 	}
 	return 0;
 }
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 2183fd2..5ccaee3 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/suspend.h>
 #include <linux/clk.h>
 
 /*
@@ -77,6 +78,9 @@
 	unsigned int		gpbr_offset;
 	int 			irq;
 	struct clk		*sclk;
+	bool			suspended;
+	unsigned long		events;
+	spinlock_t		lock;
 };
 
 #define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@
 	return 0;
 }
 
-/*
- * IRQ handler for the RTC
- */
-static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
 {
-	struct sam9_rtc *rtc = _rtc;
 	u32 sr, mr;
-	unsigned long events = 0;
 
 	/* Shared interrupt may be for another device.  Note: reading
 	 * SR clears it, so we must only read it in this irq handler!
@@ -290,20 +289,56 @@
 
 	/* alarm status */
 	if (sr & AT91_RTT_ALMS)
-		events |= (RTC_AF | RTC_IRQF);
+		rtc->events |= (RTC_AF | RTC_IRQF);
 
 	/* timer update/increment */
 	if (sr & AT91_RTT_RTTINC)
-		events |= (RTC_UF | RTC_IRQF);
-
-	rtc_update_irq(rtc->rtcdev, 1, events);
-
-	pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
-		events >> 8, events & 0x000000FF);
+		rtc->events |= (RTC_UF | RTC_IRQF);
 
 	return IRQ_HANDLED;
 }
 
+static void at91_rtc_flush_events(struct sam9_rtc *rtc)
+{
+	if (!rtc->events)
+		return;
+
+	rtc_update_irq(rtc->rtcdev, 1, rtc->events);
+	rtc->events = 0;
+
+	pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
+		rtc->events >> 8, rtc->events & 0x000000FF);
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+	struct sam9_rtc *rtc = _rtc;
+	int ret;
+
+	spin_lock(&rtc->lock);
+
+	ret = at91_rtc_cache_events(rtc);
+
+	/* We're called in suspended state */
+	if (rtc->suspended) {
+		/* Mask irqs coming from this peripheral */
+		rtt_writel(rtc, MR,
+			   rtt_readl(rtc, MR) &
+			   ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+		/* Trigger a system wakeup */
+		pm_system_wakeup();
+	} else {
+		at91_rtc_flush_events(rtc);
+	}
+
+	spin_unlock(&rtc->lock);
+
+	return ret;
+}
+
 static const struct rtc_class_ops at91_rtc_ops = {
 	.read_time	= at91_rtc_readtime,
 	.set_time	= at91_rtc_settime,
@@ -421,7 +456,8 @@
 
 	/* register irq handler after we know what name we'll use */
 	ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
-				IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc);
+			       IRQF_SHARED | IRQF_COND_SUSPEND,
+			       dev_name(&rtc->rtcdev->dev), rtc);
 	if (ret) {
 		dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
 		return ret;
@@ -482,7 +518,12 @@
 	rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
 	if (rtc->imr) {
 		if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
+			unsigned long flags;
+
 			enable_irq_wake(rtc->irq);
+			spin_lock_irqsave(&rtc->lock, flags);
+			rtc->suspended = true;
+			spin_unlock_irqrestore(&rtc->lock, flags);
 			/* don't let RTTINC cause wakeups */
 			if (mr & AT91_RTT_RTTINCIEN)
 				rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@
 	u32		mr;
 
 	if (rtc->imr) {
+		unsigned long flags;
+
 		if (device_may_wakeup(dev))
 			disable_irq_wake(rtc->irq);
 		mr = rtt_readl(rtc, MR);
 		rtt_writel(rtc, MR, mr | rtc->imr);
+
+		spin_lock_irqsave(&rtc->lock, flags);
+		rtc->suspended = false;
+		at91_rtc_cache_events(rtc);
+		at91_rtc_flush_events(rtc);
+		spin_unlock_irqrestore(&rtc->lock, flags);
 	}
 
 	return 0;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 8c3bfcb..803869c 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -399,21 +399,21 @@
 	 * of this RTC chip.  We check for it anyways in case support is
 	 * added in the future.
 	 */
-	if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+	if (unlikely(seconds >= 0xc0))
 		alrm->time.tm_sec = -1;
 	else
 		alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
 						       RTC_SECS_BCD_MASK,
 						       RTC_SECS_BIN_MASK);
 
-	if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+	if (unlikely(minutes >= 0xc0))
 		alrm->time.tm_min = -1;
 	else
 		alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
 						       RTC_MINS_BCD_MASK,
 						       RTC_MINS_BIN_MASK);
 
-	if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+	if (unlikely(hours >= 0xc0))
 		alrm->time.tm_hour = -1;
 	else
 		alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
@@ -472,13 +472,13 @@
 	 * field, and we only support four fields.  We put the support
 	 * here anyways for the future.
 	 */
-	if (unlikely((seconds >= 0xc0) && (seconds <= 0xff)))
+	if (unlikely(seconds >= 0xc0))
 		seconds = 0xff;
 
-	if (unlikely((minutes >= 0xc0) && (minutes <= 0xff)))
+	if (unlikely(minutes >= 0xc0))
 		minutes = 0xff;
 
-	if (unlikely((hours >= 0xc0) && (hours <= 0xff)))
+	if (unlikely(hours >= 0xc0))
 		hours = 0xff;
 
 	alrm->time.tm_mon	= -1;
@@ -528,7 +528,6 @@
 /* ----------------------------------------------------------------------- */
 /* /dev/rtcX Interface functions */
 
-#ifdef CONFIG_RTC_INTF_DEV
 /**
  * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off.
  * @dev: pointer to device structure.
@@ -557,7 +556,6 @@
 
 	return 0;
 }
-#endif
 /* ----------------------------------------------------------------------- */
 
 
@@ -1612,7 +1610,7 @@
 		ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false);
 
 	/* Make sure we actually matched something. */
-	if (!bcd_reg_info && !bin_reg_info)
+	if (!bcd_reg_info || !bin_reg_info)
 		return -EINVAL;
 
 	/* bcd_reg_info->reg == bin_reg_info->reg. */
@@ -1650,7 +1648,7 @@
 		return -EINVAL;
 
 	/* Make sure we actually matched something. */
-	if (!bcd_reg_info && !bin_reg_info)
+	if (!bcd_reg_info || !bin_reg_info)
 		return -EINVAL;
 
 	/* Check for a valid range. */
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4241eea..f4cf685 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -849,6 +849,7 @@
 
 static struct s3c_rtc_data const s3c6410_rtc_data = {
 	.max_user_freq		= 32768,
+	.needs_src_clk		= true,
 	.irq_handler		= s3c6410_rtc_irq,
 	.set_freq		= s3c6410_rtc_setfreq,
 	.enable_tick		= s3c6410_rtc_enable_tick,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 96128cb..da21281 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -547,7 +547,7 @@
 	 * parse input
 	 */
 	num_of_segments = 0;
-	for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
+	for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
 		for (j = i; (buf[j] != ':') &&
 			(buf[j] != '\0') &&
 			(buf[j] != '\n') &&
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 09db452..7497ddde 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -92,7 +92,7 @@
 			add = 0;
 			continue;
 		}
-		for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
+		for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
 			if (clusters_intersect(req, iter->request[pos]) &&
 			    (rq_data_dir(req) == WRITE ||
 			     rq_data_dir(iter->request[pos]) == WRITE)) {
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index aa3e2c7..a6f5ee8 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -178,12 +178,6 @@
 			break;
 		cpu_relax();
 	}
-	if (resid > 1) {
-		/* FIFO not cleared */
-		shost_printk(KERN_INFO, esp->host,
-			     "FIFO not cleared, %d bytes left\n",
-			     resid);
-	}
 
 	/*
 	 * When there is a residual BCMPLT will never be set
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 96241b2..a7cc618 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -585,7 +585,6 @@
 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
 		return NULL;
 	}
-	shost->dma_boundary = pcidev->dma_mask;
 	shost->max_id = BE2_MAX_SESSIONS;
 	shost->max_channel = 0;
 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 95d581c..a1cfbd3 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6831,10 +6831,8 @@
 						char *name)
 {
 	struct workqueue_struct *wq = NULL;
-	char wq_name[20];
 
-	snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
-	wq = alloc_ordered_workqueue(wq_name, 0);
+	wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
 	if (!wq)
 		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
 
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 62b58d3..60de662 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -500,6 +500,7 @@
 	struct sas_discovery_event *ev = to_sas_discovery_event(work);
 	struct asd_sas_port *port = ev->port;
 	struct sas_ha_struct *ha = port->ha;
+	struct domain_device *ddev = port->port_dev;
 
 	/* prevent revalidation from finding sata links in recovery */
 	mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@
 	SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
 		    task_pid_nr(current));
 
-	if (port->port_dev)
-		res = sas_ex_revalidate_domain(port->port_dev);
+	if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+		     ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+		res = sas_ex_revalidate_domain(ddev);
 
 	SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
 		    port->id, task_pid_nr(current), res);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 73f9fee..99f43b7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1570,9 +1570,7 @@
 	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
 	 */
 	memset(&port_name, 0, 36);
-	snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-		fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
-		fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+	snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
 	/*
 	 * Locate our struct se_node_acl either from an explict NodeACL created
 	 * via ConfigFS, or via running in TPG demo mode.
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0cbc1fb..2270bd5 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -546,7 +546,7 @@
 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
 {
 	sg_io_hdr_t *hp = &srp->header;
-	int err = 0;
+	int err = 0, err2;
 	int len;
 
 	if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@
 		goto err_out;
 	}
 err_out:
-	err = sg_finish_rem_req(srp);
-	return (0 == err) ? count : err;
+	err2 = sg_finish_rem_req(srp);
+	return err ? : err2 ? : count;
 }
 
 static ssize_t
@@ -1335,6 +1335,17 @@
 	}
 	/* Rely on write phase to clean out srp status values, so no "else" */
 
+	/*
+	 * Free the request as soon as it is complete so that its resources
+	 * can be reused without waiting for userspace to read() the
+	 * result.  But keep the associated bio (if any) around until
+	 * blk_rq_unmap_user() can be called from user context.
+	 */
+	srp->rq = NULL;
+	if (rq->cmd != rq->__cmd)
+		kfree(rq->cmd);
+	__blk_put_request(rq->q, rq);
+
 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (unlikely(srp->orphan)) {
 		if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@
 			return -ENOMEM;
 	}
 
-	rq = blk_get_request(q, rw, GFP_ATOMIC);
+	/*
+	 * NOTE
+	 *
+	 * With scsi-mq enabled, there are a fixed number of preallocated
+	 * requests equal in number to shost->can_queue.  If all of the
+	 * preallocated requests are already in use, then using GFP_ATOMIC with
+	 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
+	 * will cause blk_get_request() to sleep until an active command
+	 * completes, freeing up a request.  Neither option is ideal, but
+	 * GFP_KERNEL is the better choice to prevent userspace from getting an
+	 * unexpected EWOULDBLOCK.
+	 *
+	 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
+	 * does not sleep except under memory pressure.
+	 */
+	rq = blk_get_request(q, rw, GFP_KERNEL);
 	if (IS_ERR(rq)) {
 		kfree(long_cmdp);
 		return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@
 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
 				      "sg_finish_rem_req: res_used=%d\n",
 				      (int) srp->res_used));
-	if (srp->rq) {
-		if (srp->bio)
-			ret = blk_rq_unmap_user(srp->bio);
+	if (srp->bio)
+		ret = blk_rq_unmap_user(srp->bio);
 
+	if (srp->rq) {
 		if (srp->rq->cmd != srp->rq->__cmd)
 			kfree(srp->rq->cmd);
 		blk_put_request(srp->rq);
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 7702664..289ad01 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -870,6 +870,7 @@
 }
 
 static struct scsi_host_template wd719x_template = {
+	.module				= THIS_MODULE,
 	.name				= "Western Digital 719x",
 	.queuecommand			= wd719x_queuecommand,
 	.eh_abort_handler		= wd719x_abort,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index f3ee439d..cd4c293 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -81,7 +81,9 @@
 		if (!of_machine_is_compatible("renesas,emev2") &&
 		    !of_machine_is_compatible("renesas,r7s72100") &&
 		    !of_machine_is_compatible("renesas,r8a73a4") &&
+#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
 		    !of_machine_is_compatible("renesas,r8a7740") &&
+#endif
 		    !of_machine_is_compatible("renesas,r8a7778") &&
 		    !of_machine_is_compatible("renesas,r8a7779") &&
 		    !of_machine_is_compatible("renesas,r8a7790") &&
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 9af7841..06de340 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -764,17 +764,17 @@
 			(unsigned long long)xfer->rx_dma);
 	}
 
-	/* REVISIT: We're waiting for ENDRX before we start the next
+	/* REVISIT: We're waiting for RXBUFF before we start the next
 	 * transfer because we need to handle some difficult timing
-	 * issues otherwise. If we wait for ENDTX in one transfer and
-	 * then starts waiting for ENDRX in the next, it's difficult
-	 * to tell the difference between the ENDRX interrupt we're
-	 * actually waiting for and the ENDRX interrupt of the
+	 * issues otherwise. If we wait for TXBUFE in one transfer and
+	 * then starts waiting for RXBUFF in the next, it's difficult
+	 * to tell the difference between the RXBUFF interrupt we're
+	 * actually waiting for and the RXBUFF interrupt of the
 	 * previous transfer.
 	 *
 	 * It should be doable, though. Just not now...
 	 */
-	spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
+	spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
 	spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
 }
 
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a0197fd..3ce39d1 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -139,6 +139,9 @@
 				1,
 				DMA_MEM_TO_DEV,
 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!txdesc)
+		return NULL;
+
 	txdesc->callback = dw_spi_dma_tx_done;
 	txdesc->callback_param = dws;
 
@@ -184,6 +187,9 @@
 				1,
 				DMA_DEV_TO_MEM,
 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!rxdesc)
+		return NULL;
+
 	rxdesc->callback = dw_spi_dma_rx_done;
 	rxdesc->callback_param = dws;
 
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 5ba3310..6d331e0 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -36,13 +36,13 @@
 
 static struct spi_pci_desc spi_pci_mid_desc_1 = {
 	.setup = dw_spi_mid_init,
-	.num_cs = 32,
+	.num_cs = 5,
 	.bus_num = 0,
 };
 
 static struct spi_pci_desc spi_pci_mid_desc_2 = {
 	.setup = dw_spi_mid_init,
-	.num_cs = 4,
+	.num_cs = 2,
 	.bus_num = 1,
 };
 
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 5a97a62..4847afb 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,14 +621,14 @@
 	if (!dws->fifo_len) {
 		u32 fifo;
 
-		for (fifo = 2; fifo <= 256; fifo++) {
+		for (fifo = 1; fifo < 256; fifo++) {
 			dw_writew(dws, DW_SPI_TXFLTR, fifo);
 			if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
 				break;
 		}
 		dw_writew(dws, DW_SPI_TXFLTR, 0);
 
-		dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+		dws->fifo_len = (fifo == 1) ? 0 : fifo;
 		dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
 	}
 }
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index c01567d..e649bc7 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -459,6 +459,13 @@
 	unsigned long flags;
 	int ret;
 
+	if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+		dev_err(spfi->dev,
+			"Transfer length (%d) is greater than the max supported (%d)",
+			xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+		return -EINVAL;
+	}
+
 	/*
 	 * Stop all DMA and reset the controller if the previous transaction
 	 * timed-out and never completed it's DMA.
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 89ca162..ee513a8 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -534,12 +534,12 @@
 	pl022->cur_msg = NULL;
 	pl022->cur_transfer = NULL;
 	pl022->cur_chip = NULL;
-	spi_finalize_current_message(pl022->master);
 
 	/* disable the SPI/SSP operation */
 	writew((readw(SSP_CR1(pl022->virtbase)) &
 		(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
 
+	spi_finalize_current_message(pl022->master);
 }
 
 /**
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 884a716..5c06168 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -101,6 +101,7 @@
 #define QSPI_FLEN(n)			((n - 1) << 0)
 
 /* STATUS REGISTER */
+#define BUSY				0x01
 #define WC				0x02
 
 /* INTERRUPT REGISTER */
@@ -199,6 +200,21 @@
 	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
 }
 
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+	u32 stat;
+	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+	while ((stat & BUSY) && time_after(timeout, jiffies)) {
+		cpu_relax();
+		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+	}
+
+	WARN(stat & BUSY, "qspi busy\n");
+	return stat & BUSY;
+}
+
 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
 	int wlen, count;
@@ -211,6 +227,9 @@
 	wlen = t->bits_per_word >> 3;	/* in bytes */
 
 	while (count) {
+		if (qspi_is_busy(qspi))
+			return -EBUSY;
+
 		switch (wlen) {
 		case 1:
 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
@@ -266,6 +285,9 @@
 
 	while (count) {
 		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+		if (qspi_is_busy(qspi))
+			return -EBUSY;
+
 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
 		if (!wait_for_completion_timeout(&qspi->transfer_complete,
 						 QSPI_COMPLETION_TIMEOUT)) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 9800c01..3f72451 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -426,7 +426,6 @@
 				unsigned int *data)
 {
 	struct pci1710_private *devpriv = dev->private;
-	unsigned int chan = CR_CHAN(insn->chanspec);
 	int ret = 0;
 	int i;
 
@@ -447,7 +446,7 @@
 		if (ret)
 			break;
 
-		ret = pci171x_ai_read_sample(dev, s, chan, &val);
+		ret = pci171x_ai_read_sample(dev, s, 0, &val);
 		if (ret)
 			break;
 
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index dbdea71..e856f01 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -91,9 +91,10 @@
 			stalled++;
 			if (stalled > 10)
 				break;
+		} else {
+			residue = new_residue;
+			stalled = 0;
 		}
-		residue = new_residue;
-		stalled = 0;
 	}
 	return residue;
 }
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index e371183..a090668 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -103,11 +103,6 @@
 	VMK8061_MODEL
 };
 
-struct firmware_version {
-	unsigned char ic3_vers[32];	/* USB-Controller */
-	unsigned char ic6_vers[32];	/* CPU */
-};
-
 static const struct comedi_lrange vmk8061_range = {
 	2, {
 		UNI_RANGE(5),
@@ -156,68 +151,12 @@
 struct vmk80xx_private {
 	struct usb_endpoint_descriptor *ep_rx;
 	struct usb_endpoint_descriptor *ep_tx;
-	struct firmware_version fw;
 	struct semaphore limit_sem;
 	unsigned char *usb_rx_buf;
 	unsigned char *usb_tx_buf;
 	enum vmk80xx_model model;
 };
 
-static int vmk80xx_check_data_link(struct comedi_device *dev)
-{
-	struct vmk80xx_private *devpriv = dev->private;
-	struct usb_device *usb = comedi_to_usb_dev(dev);
-	unsigned int tx_pipe;
-	unsigned int rx_pipe;
-	unsigned char tx[1];
-	unsigned char rx[2];
-
-	tx_pipe = usb_sndbulkpipe(usb, 0x01);
-	rx_pipe = usb_rcvbulkpipe(usb, 0x81);
-
-	tx[0] = VMK8061_CMD_RD_PWR_STAT;
-
-	/*
-	 * Check that IC6 (PIC16F871) is powered and
-	 * running and the data link between IC3 and
-	 * IC6 is working properly
-	 */
-	usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
-	usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10);
-
-	return (int)rx[1];
-}
-
-static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag)
-{
-	struct vmk80xx_private *devpriv = dev->private;
-	struct usb_device *usb = comedi_to_usb_dev(dev);
-	unsigned int tx_pipe;
-	unsigned int rx_pipe;
-	unsigned char tx[1];
-	unsigned char rx[64];
-	int cnt;
-
-	tx_pipe = usb_sndbulkpipe(usb, 0x01);
-	rx_pipe = usb_rcvbulkpipe(usb, 0x81);
-
-	tx[0] = VMK8061_CMD_RD_VERSION;
-
-	/*
-	 * Read the firmware version info of IC3 and
-	 * IC6 from the internal EEPROM of the IC
-	 */
-	usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval);
-	usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10);
-
-	rx[cnt] = '\0';
-
-	if (flag & IC3_VERSION)
-		strncpy(devpriv->fw.ic3_vers, rx + 1, 24);
-	else			/* IC6_VERSION */
-		strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
-}
-
 static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
 {
 	struct vmk80xx_private *devpriv = dev->private;
@@ -878,16 +817,6 @@
 
 	usb_set_intfdata(intf, devpriv);
 
-	if (devpriv->model == VMK8061_MODEL) {
-		vmk80xx_read_eeprom(dev, IC3_VERSION);
-		dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
-
-		if (vmk80xx_check_data_link(dev)) {
-			vmk80xx_read_eeprom(dev, IC6_VERSION);
-			dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
-		}
-	}
-
 	if (devpriv->model == VMK8055_MODEL)
 		vmk80xx_reset_device(dev);
 
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index d9d6fad..8161743 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -214,11 +214,17 @@
 	unsigned long		is_divided;
 
 	/*
-	 * Touchscreen LRADC channels receives a private slot in the CTRL4
-	 * register, the slot #7. Therefore only 7 slots instead of 8 in the
-	 * CTRL4 register can be mapped to LRADC channels when using the
-	 * touchscreen.
-	 *
+	 * When the touchscreen is enabled, we give it two private virtual
+	 * channels: #6 and #7. This means that only 6 virtual channels (instead
+	 * of 8) will be available for buffered capture.
+	 */
+#define TOUCHSCREEN_VCHANNEL1		7
+#define TOUCHSCREEN_VCHANNEL2		6
+#define BUFFER_VCHANS_LIMITED		0x3f
+#define BUFFER_VCHANS_ALL		0xff
+	u8			buffer_vchans;
+
+	/*
 	 * Furthermore, certain LRADC channels are shared between touchscreen
 	 * and/or touch-buttons and generic LRADC block. Therefore when using
 	 * either of these, these channels are not available for the regular
@@ -342,6 +348,9 @@
 #define	LRADC_CTRL4				0x140
 #define	LRADC_CTRL4_LRADCSELECT_MASK(n)		(0xf << ((n) * 4))
 #define	LRADC_CTRL4_LRADCSELECT_OFFSET(n)	((n) * 4)
+#define	LRADC_CTRL4_LRADCSELECT(n, x) \
+				(((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
+				LRADC_CTRL4_LRADCSELECT_MASK(n))
 
 #define LRADC_RESOLUTION			12
 #define LRADC_SINGLE_SAMPLE_MASK		((1 << LRADC_RESOLUTION) - 1)
@@ -416,6 +425,14 @@
 					LRADC_STATUS_TOUCH_DETECT_RAW);
 }
 
+static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
+				  unsigned ch)
+{
+	mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
+				LRADC_CTRL4);
+	mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
+}
+
 static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
 {
 	/*
@@ -450,12 +467,8 @@
 		LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
 			LRADC_DELAY(3));
 
-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
-			LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
-			LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
 
-	/* wake us again, when the complete conversion is done */
-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
 	/*
 	 * after changing the touchscreen plates setting
 	 * the signals need some initial time to settle. Start the
@@ -509,12 +522,8 @@
 		LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
 					LRADC_DELAY(3));
 
-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
-			LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
-			LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
 
-	/* wake us again, when the conversions are done */
-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
 	/*
 	 * after changing the touchscreen plates setting
 	 * the signals need some initial time to settle. Start the
@@ -580,36 +589,6 @@
 #define TS_CH_XM 4
 #define TS_CH_YM 5
 
-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
-{
-	u32 reg;
-	int val;
-
-	reg = readl(lradc->base + LRADC_CTRL1);
-
-	/* only channels 3 to 5 are of interest here */
-	if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
-			LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
-	} else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
-			LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
-	} else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
-			LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
-	} else {
-		return -EIO;
-	}
-
-	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
-	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
-
-	return val;
-}
-
 /*
  * YP(open)--+-------------+
  *           |             |--+
@@ -653,7 +632,8 @@
 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
 
 	lradc->cur_plate = LRADC_SAMPLE_X;
-	mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
+	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
+	mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
 }
 
 /*
@@ -674,7 +654,8 @@
 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
 
 	lradc->cur_plate = LRADC_SAMPLE_Y;
-	mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
+	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
+	mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
 }
 
 /*
@@ -695,7 +676,10 @@
 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
 
 	lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
-	mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
+	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
+	mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
+						TOUCHSCREEN_VCHANNEL1);
 }
 
 static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
@@ -708,6 +692,19 @@
 	mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
 }
 
+static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
+{
+	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+				LRADC_CTRL1);
+	mxs_lradc_reg_set(lradc,
+		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
+	/*
+	 * start with the Y-pos, because it uses nearly the same plate
+	 * settings like the touch detection
+	 */
+	mxs_lradc_prepare_y_pos(lradc);
+}
+
 static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
 {
 	input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
@@ -725,10 +722,12 @@
 	 * start a dummy conversion to burn time to settle the signals
 	 * note: we are not interested in the conversion's value
 	 */
-	mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
-	mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
+	mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
+	mxs_lradc_reg_clear(lradc,
+		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
+	mxs_lradc_reg_wrt(lradc,
+		LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
 		LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
 			LRADC_DELAY(2));
 }
@@ -760,59 +759,45 @@
 
 	/* if it is released, wait for the next touch via IRQ */
 	lradc->cur_plate = LRADC_TOUCH;
-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
+	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
+		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
+		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
 	mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
 }
 
 /* touchscreen's state machine */
 static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
 {
-	int val;
-
 	switch (lradc->cur_plate) {
 	case LRADC_TOUCH:
-		/*
-		 * start with the Y-pos, because it uses nearly the same plate
-		 * settings like the touch detection
-		 */
-		if (mxs_lradc_check_touch_event(lradc)) {
-			mxs_lradc_reg_clear(lradc,
-					LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
-					LRADC_CTRL1);
-			mxs_lradc_prepare_y_pos(lradc);
-		}
+		if (mxs_lradc_check_touch_event(lradc))
+			mxs_lradc_start_touch_event(lradc);
 		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
 					LRADC_CTRL1);
 		return;
 
 	case LRADC_SAMPLE_Y:
-		val = mxs_lradc_read_ts_channel(lradc);
-		if (val < 0) {
-			mxs_lradc_enable_touch_detection(lradc); /* re-start */
-			return;
-		}
-		lradc->ts_y_pos = val;
+		lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
+							TOUCHSCREEN_VCHANNEL1);
 		mxs_lradc_prepare_x_pos(lradc);
 		return;
 
 	case LRADC_SAMPLE_X:
-		val = mxs_lradc_read_ts_channel(lradc);
-		if (val < 0) {
-			mxs_lradc_enable_touch_detection(lradc); /* re-start */
-			return;
-		}
-		lradc->ts_x_pos = val;
+		lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
+							TOUCHSCREEN_VCHANNEL1);
 		mxs_lradc_prepare_pressure(lradc);
 		return;
 
 	case LRADC_SAMPLE_PRESSURE:
-		lradc->ts_pressure =
-			mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
+		lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
+							TOUCHSCREEN_VCHANNEL2,
+							TOUCHSCREEN_VCHANNEL1);
 		mxs_lradc_complete_touch_event(lradc);
 		return;
 
 	case LRADC_SAMPLE_VALID:
-		val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
 		mxs_lradc_finish_touch_event(lradc, 1);
 		break;
 	}
@@ -844,9 +829,9 @@
 	 * used if doing raw sampling.
 	 */
 	if (lradc->soc == IMX28_LRADC)
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
 			LRADC_CTRL1);
-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+	mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
 
 	/* Enable / disable the divider per requirement */
 	if (test_bit(chan, &lradc->is_divided))
@@ -1090,9 +1075,8 @@
 {
 	/* stop all interrupts from firing */
 	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
-		LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
-		LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
-		LRADC_CTRL1);
+		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
+		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
 
 	/* Power-down touchscreen touch-detect circuitry. */
 	mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
@@ -1158,26 +1142,31 @@
 	struct iio_dev *iio = data;
 	struct mxs_lradc *lradc = iio_priv(iio);
 	unsigned long reg = readl(lradc->base + LRADC_CTRL1);
+	uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
 	const uint32_t ts_irq_mask =
 		LRADC_CTRL1_TOUCH_DETECT_IRQ |
-		LRADC_CTRL1_LRADC_IRQ(2) |
-		LRADC_CTRL1_LRADC_IRQ(3) |
-		LRADC_CTRL1_LRADC_IRQ(4) |
-		LRADC_CTRL1_LRADC_IRQ(5);
+		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
 
 	if (!(reg & mxs_lradc_irq_mask(lradc)))
 		return IRQ_NONE;
 
-	if (lradc->use_touchscreen && (reg & ts_irq_mask))
+	if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
 		mxs_lradc_handle_touch(lradc);
 
-	if (iio_buffer_enabled(iio))
-		iio_trigger_poll(iio->trig);
-	else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
-		complete(&lradc->completion);
+		/* Make sure we don't clear the next conversion's interrupt. */
+		clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
+				LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
+	}
 
-	mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
-			LRADC_CTRL1);
+	if (iio_buffer_enabled(iio)) {
+		if (reg & lradc->buffer_vchans)
+			iio_trigger_poll(iio->trig);
+	} else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
+		complete(&lradc->completion);
+	}
+
+	mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
 
 	return IRQ_HANDLED;
 }
@@ -1289,9 +1278,10 @@
 	}
 
 	if (lradc->soc == IMX28_LRADC)
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
-							LRADC_CTRL1);
-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+		mxs_lradc_reg_clear(lradc,
+			lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+			LRADC_CTRL1);
+	mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
 
 	for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
 		ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
@@ -1324,10 +1314,11 @@
 	mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
 					LRADC_DELAY_KICK, LRADC_DELAY(0));
 
-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
+	mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
 	if (lradc->soc == IMX28_LRADC)
-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
-					LRADC_CTRL1);
+		mxs_lradc_reg_clear(lradc,
+			lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+			LRADC_CTRL1);
 
 	kfree(lradc->buffer);
 	mutex_unlock(&lradc->lock);
@@ -1353,7 +1344,7 @@
 	if (lradc->use_touchbutton)
 		rsvd_chans++;
 	if (lradc->use_touchscreen)
-		rsvd_chans++;
+		rsvd_chans += 2;
 
 	/* Test for attempts to map channels with special mode of operation. */
 	if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
@@ -1413,6 +1404,13 @@
 		.channel = 8,
 		.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
 	},
+	/* Hidden channel to keep indexes */
+	{
+		.type = IIO_TEMP,
+		.indexed = 1,
+		.scan_index = -1,
+		.channel = 9,
+	},
 	MXS_ADC_CHAN(10, IIO_VOLTAGE),	/* VDDIO */
 	MXS_ADC_CHAN(11, IIO_VOLTAGE),	/* VTH */
 	MXS_ADC_CHAN(12, IIO_VOLTAGE),	/* VDDA */
@@ -1583,6 +1581,11 @@
 
 	touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
 
+	if (touch_ret == 0)
+		lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
+	else
+		lradc->buffer_vchans = BUFFER_VCHANS_ALL;
+
 	/* Grab all IRQ sources */
 	for (i = 0; i < of_cfg->irq_count; i++) {
 		lradc->irq[i] = platform_get_irq(pdev, i);
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 017d2f8..c17893b 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/bitops.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -68,7 +69,7 @@
 		break;
 	case IIO_ANGL_VEL:
 		vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
-		vel = (vel << 4) >> 4;
+		vel = sign_extend32(vel, 11);
 		*val = vel;
 		break;
 	default:
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 88614b7..ddf1fa9 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -270,7 +270,7 @@
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
 			    struct lookup_intent *it,
-			    struct dentry *de)
+			    struct inode *inode)
 {
 	int rc = 0;
 
@@ -280,19 +280,17 @@
 	if (it_disposition(it, DISP_LOOKUP_NEG))
 		return -ENOENT;
 
-	rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+	rc = ll_prep_inode(&inode, request, NULL, it);
 
 	return rc;
 }
 
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
 	LASSERT(it != NULL);
-	LASSERT(dentry != NULL);
 
-	if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
-		struct inode *inode = dentry->d_inode;
-		struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+	if (it->d.lustre.it_lock_mode && inode != NULL) {
+		struct ll_sb_info *sbi = ll_i2sbi(inode);
 
 		CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
 		       inode, inode->i_ino, inode->i_generation);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 7c7ef7e..5ebee6c 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -2912,8 +2912,8 @@
 			oit.it_op = IT_LOOKUP;
 
 		/* Call getattr by fid, so do not provide name at all. */
-		op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
-					     dentry->d_inode, NULL, 0, 0,
+		op_data = ll_prep_md_op_data(NULL, inode,
+					     inode, NULL, 0, 0,
 					     LUSTRE_OPC_ANY, NULL);
 		if (IS_ERR(op_data))
 			return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@
 			goto out;
 		}
 
-		rc = ll_revalidate_it_finish(req, &oit, dentry);
+		rc = ll_revalidate_it_finish(req, &oit, inode);
 		if (rc != 0) {
 			ll_intent_release(&oit);
 			goto out;
@@ -2944,7 +2944,7 @@
 		if (!dentry->d_inode->i_nlink)
 			d_lustre_invalidate(dentry, 0);
 
-		ll_lookup_finish_locks(&oit, dentry);
+		ll_lookup_finish_locks(&oit, inode);
 	} else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
 		struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
 		u64 valid = OBD_MD_FLGETATTR;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index d032c2b..2af1d72 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -786,9 +786,9 @@
 void ll_intent_drop_lock(struct lookup_intent *);
 void ll_intent_release(struct lookup_intent *);
 void ll_invalidate_aliases(struct inode *);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
-			    struct lookup_intent *it, struct dentry *de);
+			    struct lookup_intent *it, struct inode *inode);
 
 /* llite/llite_lib.c */
 extern struct super_operations lustre_super_operations;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 4f361b7..890ac19 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -481,6 +481,7 @@
 	struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
 	struct dentry *save = dentry, *retval;
 	struct ptlrpc_request *req = NULL;
+	struct inode *inode;
 	struct md_op_data *op_data;
 	__u32 opc;
 	int rc;
@@ -539,12 +540,13 @@
 		goto out;
 	}
 
-	if ((it->it_op & IT_OPEN) && dentry->d_inode &&
-	    !S_ISREG(dentry->d_inode->i_mode) &&
-	    !S_ISDIR(dentry->d_inode->i_mode)) {
-		ll_release_openhandle(dentry->d_inode, it);
+	inode = dentry->d_inode;
+	if ((it->it_op & IT_OPEN) && inode &&
+	    !S_ISREG(inode->i_mode) &&
+	    !S_ISDIR(inode->i_mode)) {
+		ll_release_openhandle(inode, it);
 	}
-	ll_lookup_finish_locks(it, dentry);
+	ll_lookup_finish_locks(it, inode);
 
 	if (dentry == save)
 		retval = NULL;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index aebde32..50bad55 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -30,7 +30,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
 #include "iscsi_target_device.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #include <target/iscsi/iscsi_transport.h>
 
@@ -968,11 +968,7 @@
 
 	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-		spin_lock_bh(&conn->sess->ttt_lock);
-		cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
-			cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-		spin_unlock_bh(&conn->sess->ttt_lock);
+		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
 	} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
 		cmd->targ_xfer_tag = 0xFFFFFFFF;
 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@
 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
 	cmd->data_direction	= DMA_NONE;
+	cmd->text_in_ptr	= NULL;
 
 	return 0;
 }
@@ -2011,9 +2008,13 @@
 	int cmdsn_ret;
 
 	if (!text_in) {
-		pr_err("Unable to locate text_in buffer for sendtargets"
-		       " discovery\n");
-		goto reject;
+		cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+		if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+			pr_err("Unable to locate text_in buffer for sendtargets"
+			       " discovery\n");
+			goto reject;
+		}
+		goto empty_sendtargets;
 	}
 	if (strncmp("SendTargets", text_in, 11) != 0) {
 		pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@
 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
+empty_sendtargets:
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@
 	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
 			(struct scsi_lun *)&hdr->lun);
 	hdr->itt		= cmd->init_task_tag;
-	spin_lock_bh(&conn->sess->ttt_lock);
-	r2t->targ_xfer_tag	= conn->sess->targ_xfer_tag++;
-	if (r2t->targ_xfer_tag == 0xFFFFFFFF)
-		r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-	spin_unlock_bh(&conn->sess->ttt_lock);
+	r2t->targ_xfer_tag	= session_get_next_ttt(conn->sess);
 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@
 
 static int
 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
-				  enum iscsit_transport_type network_transport)
+				  enum iscsit_transport_type network_transport,
+				  int skip_bytes, bool *completed)
 {
 	char *payload = NULL;
 	struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@
 	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
 
-	buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
 			 SENDTARGETS_BUF_LIMIT);
 
 	payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@
 						end_of_buf = 1;
 						goto eob;
 					}
-					memcpy(payload + payload_len, buf, len);
-					payload_len += len;
-					target_name_printed = 1;
+
+					if (skip_bytes && len <= skip_bytes) {
+						skip_bytes -= len;
+					} else {
+						memcpy(payload + payload_len, buf, len);
+						payload_len += len;
+						target_name_printed = 1;
+						if (len > skip_bytes)
+							skip_bytes = 0;
+					}
 				}
 
 				len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@
 					end_of_buf = 1;
 					goto eob;
 				}
-				memcpy(payload + payload_len, buf, len);
-				payload_len += len;
+
+				if (skip_bytes && len <= skip_bytes) {
+					skip_bytes -= len;
+				} else {
+					memcpy(payload + payload_len, buf, len);
+					payload_len += len;
+					if (len > skip_bytes)
+						skip_bytes = 0;
+				}
 			}
 			spin_unlock(&tpg->tpg_np_lock);
 		}
 		spin_unlock(&tiqn->tiqn_tpg_lock);
 eob:
-		if (end_of_buf)
+		if (end_of_buf) {
+			*completed = false;
 			break;
+		}
 
 		if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
 			break;
@@ -3528,13 +3543,23 @@
 		      enum iscsit_transport_type network_transport)
 {
 	int text_length, padding;
+	bool completed = true;
 
-	text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+	text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+							cmd->read_data_done,
+							&completed);
 	if (text_length < 0)
 		return text_length;
 
+	if (completed) {
+		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+	} else {
+		hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+		cmd->read_data_done += text_length;
+		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+			cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+	}
 	hdr->opcode = ISCSI_OP_TEXT_RSP;
-	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 	padding = ((-text_length) & 3);
 	hton24(hdr->dlength, text_length);
 	hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@
 	hdr->statsn = cpu_to_be32(cmd->stat_sn);
 
 	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	/*
+	 * Reset maxcmdsn_inc in multi-part text payload exchanges to
+	 * correctly increment MaxCmdSN for each response answering a
+	 * non immediate text request with a valid CmdSN.
+	 */
+	cmd->maxcmdsn_inc = 0;
 	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
 	hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
 
-	pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
-		" Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
-		text_length, conn->cid);
+	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+		cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+		!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+		!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
 
 	return text_length + padding;
 }
 EXPORT_SYMBOL(iscsit_build_text_rsp);
 
-/*
- *	FIXME: Add support for F_BIT and C_BIT when the length is longer than
- *	MaxRecvDataSegmentLength.
- */
 static int iscsit_send_text_rsp(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@
 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
 		break;
 	case ISCSI_OP_TEXT:
-		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
-		if (!cmd)
-			goto reject;
+		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+			if (!cmd)
+				goto reject;
+		} else {
+			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+			if (!cmd)
+				goto reject;
+		}
 
 		ret = iscsit_handle_text_cmd(conn, cmd, buf);
 		break;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index ab4915c..47e249d 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 9059c1e..48384b6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -28,7 +28,7 @@
 #include <target/configfs_macros.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_configfs.h"
 
 struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@
 		rb += sprintf(page+rb, "InitiatorAlias: %s\n",
 			sess->sess_ops->InitiatorAlias);
 
-		rb += sprintf(page+rb, "LIO Session ID: %u   "
-			"ISID: 0x%02x %02x %02x %02x %02x %02x  "
-			"TSIH: %hu  ", sess->sid,
-			sess->isid[0], sess->isid[1], sess->isid[2],
-			sess->isid[3], sess->isid[4], sess->isid[5],
-			sess->tsih);
+		rb += sprintf(page+rb,
+			      "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+			      sess->sid, sess->isid, sess->tsih);
 		rb += sprintf(page+rb, "SessionType: %s\n",
 				(sess->sess_ops->SessionType) ?
 				"Discovery" : "Normal");
@@ -1758,9 +1755,7 @@
 	/*
 	 * iSCSI Initiator Session Identifier from RFC-3720.
 	 */
-	return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
-		sess->isid[0], sess->isid[1], sess->isid[2],
-		sess->isid[3], sess->isid[4], sess->isid[5]);
+	return snprintf(buf, size, "%6phN", sess->isid);
 }
 
 static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index e93d5a7..fb3b52b 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -18,7 +18,7 @@
 
 #include <scsi/iscsi_proto.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
 #include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 7087c73..34c3cd1 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index a0ae5fc..1c197ba 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,8 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_transport.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
 #include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@
 
 	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 		spin_unlock_bh(&conn->state_lock);
-		iscsit_close_connection(conn);
+		if (conn->conn_transport->transport_type == ISCSI_TCP)
+			iscsit_close_connection(conn);
 		return;
 	}
 
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index cda4d80..2e561de 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 4ca8fd2..e24f1c7 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target_erl0.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 713c0c1..153fb66 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -24,14 +24,14 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_nego.h"
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_erl2.h"
 #include "iscsi_target_login.h"
-#include "iscsi_target_stat.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 62a095f..8c02fa3 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nego.h"
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 16454a9..208cca8 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -18,7 +18,7 @@
 
 #include <target/target_core_base.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 18c2926..d4f9e96 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -18,7 +18,7 @@
 
 #include <linux/slab.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
 
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index ca41b58..e446a09 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 1033955..5e1349a 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,12 +23,12 @@
 #include <target/target_core_base.h>
 #include <target/configfs_macros.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 78404b1..b0224a7 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -23,7 +23,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 9053a3c..bdd127c 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -20,7 +20,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nodeattrib.h"
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 601e9cc..26aa509 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -20,40 +20,26 @@
 #include <linux/list.h>
 #include <linux/bitmap.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target.h"
 
-static LIST_HEAD(active_ts_list);
 static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
 static DEFINE_SPINLOCK(inactive_ts_lock);
 static DEFINE_SPINLOCK(ts_bitmap_lock);
 
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
-	spin_lock(&active_ts_lock);
-	list_add_tail(&ts->ts_list, &active_ts_list);
-	iscsit_global->active_ts++;
-	spin_unlock(&active_ts_lock);
-}
-
 static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
 {
+	if (!list_empty(&ts->ts_list)) {
+		WARN_ON(1);
+		return;
+	}
 	spin_lock(&inactive_ts_lock);
 	list_add_tail(&ts->ts_list, &inactive_ts_list);
 	iscsit_global->inactive_ts++;
 	spin_unlock(&inactive_ts_lock);
 }
 
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
-	spin_lock(&active_ts_lock);
-	list_del(&ts->ts_list);
-	iscsit_global->active_ts--;
-	spin_unlock(&active_ts_lock);
-}
-
 static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 {
 	struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@
 
 	ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
 
-	list_del(&ts->ts_list);
+	list_del_init(&ts->ts_list);
 	iscsit_global->inactive_ts--;
 	spin_unlock(&inactive_ts_lock);
 
@@ -204,8 +190,6 @@
 
 void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
 {
-	iscsi_add_ts_to_active_list(ts);
-
 	spin_lock_bh(&ts->ts_state_lock);
 	conn->thread_set = ts;
 	ts->conn = conn;
@@ -397,7 +381,6 @@
 
 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
 		spin_unlock_bh(&ts->ts_state_lock);
-		iscsi_del_ts_from_active_list(ts);
 
 		if (!iscsit_global->in_shutdown)
 			iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@
 
 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
 		spin_unlock_bh(&ts->ts_state_lock);
-		iscsi_del_ts_from_active_list(ts);
 
 		if (!iscsit_global->in_shutdown)
 			iscsi_deallocate_extra_thread_sets();
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index bcd88ec..390df8e 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -25,7 +25,7 @@
 #include <target/target_core_configfs.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@
 			init_task_tag, conn->cid);
 	return NULL;
 }
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
 
 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
 	struct iscsi_conn *conn,
@@ -939,13 +940,8 @@
 	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
 				ISTATE_SEND_NOPIN_NO_RESPONSE;
 	cmd->init_task_tag = RESERVED_ITT;
-	spin_lock_bh(&conn->sess->ttt_lock);
-	cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
-			0xFFFFFFFF;
-	if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
-		cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-	spin_unlock_bh(&conn->sess->ttt_lock);
-
+	cmd->targ_xfer_tag = (want_response) ?
+			     session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
 	spin_lock_bh(&conn->cmd_lock);
 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index a68508c..1ab754a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -16,7 +16,6 @@
 extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 			       unsigned char * ,__be32 cmdsn);
 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
 			itt_t, u32);
 extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d836de2..44620fb 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -494,6 +494,11 @@
 		target_complete_cmd(cmd, SAM_STAT_GOOD);
 		return 0;
 	}
+	if (cmd->prot_op) {
+		pr_err("WRITE_SAME: Protection information with FILEIO"
+		       " backends not supported\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
 	sg = &cmd->t_data_sg[0];
 
 	if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 78346b8..d4a4b0f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -464,6 +464,11 @@
 	sector_t block_lba = cmd->t_task_lba;
 	sector_t sectors = sbc_get_write_same_sectors(cmd);
 
+	if (cmd->prot_op) {
+		pr_err("WRITE_SAME: Protection information with IBLOCK"
+		       " backends not supported\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
 	sg = &cmd->t_data_sg[0];
 
 	if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 283cf78..2de6fb8 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1874,8 +1874,8 @@
 		}
 
 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-			pr_err("Unable to update renaming"
-				" APTPL metadata\n");
+			pr_err("Unable to update renaming APTPL metadata,"
+			       " reallocating larger buffer\n");
 			ret = -EMSGSIZE;
 			goto out;
 		}
@@ -1892,8 +1892,8 @@
 			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-			pr_err("Unable to update renaming"
-				" APTPL metadata\n");
+			pr_err("Unable to update renaming APTPL metadata,"
+			       " reallocating larger buffer\n");
 			ret = -EMSGSIZE;
 			goto out;
 		}
@@ -1956,7 +1956,7 @@
 static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
 {
 	unsigned char *buf;
-	int rc;
+	int rc, len = PR_APTPL_BUF_LEN;
 
 	if (!aptpl) {
 		char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@
 
 		return 0;
 	}
-
-	buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+	buf = vzalloc(len);
 	if (!buf)
 		return TCM_OUT_OF_RESOURCES;
 
-	rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+	rc = core_scsi3_update_aptpl_buf(dev, buf, len);
 	if (rc < 0) {
-		kfree(buf);
-		return TCM_OUT_OF_RESOURCES;
+		vfree(buf);
+		len *= 2;
+		goto retry;
 	}
 
 	rc = __core_scsi3_write_aptpl_to_file(dev, buf);
 	if (rc != 0) {
 		pr_err("SPC-3 PR: Could not update APTPL\n");
-		kfree(buf);
+		vfree(buf);
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 	}
 	dev->t10_pr.pr_aptpl_active = 1;
-	kfree(buf);
+	vfree(buf);
 	pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
 	return 0;
 }
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index cd4bed7..9a2f9d3 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -37,6 +37,9 @@
 #include "target_core_alua.h"
 
 static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+
+static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
@@ -251,7 +254,10 @@
 static sense_reason_t
 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 {
+	struct se_device *dev = cmd->se_dev;
+	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
+	sense_reason_t ret;
 
 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
 		pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@
 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
 		return TCM_INVALID_CDB_FIELD;
 	}
+	/*
+	 * Sanity check for LBA wrap and request past end of device.
+	 */
+	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+	    ((cmd->t_task_lba + sectors) > end_lba)) {
+		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+		return TCM_ADDRESS_OUT_OF_RANGE;
+	}
+
 	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
 	if (flags[0] & 0x10) {
 		pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@
 		if (!ops->execute_write_same_unmap)
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+		if (!dev->dev_attrib.emulate_tpws) {
+			pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+			       " has emulate_tpws disabled\n");
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		}
 		cmd->execute_cmd = ops->execute_write_same_unmap;
 		return 0;
 	}
 	if (!ops->execute_write_same)
 		return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+	ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+	if (ret)
+		return ret;
+
 	cmd->execute_cmd = ops->execute_write_same;
 	return 0;
 }
@@ -614,14 +639,21 @@
 	return 0;
 }
 
-static bool
+static sense_reason_t
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
 	       u32 sectors, bool is_write)
 {
 	u8 protect = cdb[1] >> 5;
 
-	if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
-		return true;
+	if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+		if (protect && !dev->dev_attrib.pi_prot_type) {
+			pr_err("CDB contains protect bit, but device does not"
+			       " advertise PROTECT=1 feature bit\n");
+			return TCM_INVALID_CDB_FIELD;
+		}
+		if (cmd->prot_pto)
+			return TCM_NO_SENSE;
+	}
 
 	switch (dev->dev_attrib.pi_prot_type) {
 	case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@
 		break;
 	case TARGET_DIF_TYPE2_PROT:
 		if (protect)
-			return false;
+			return TCM_INVALID_CDB_FIELD;
 
 		cmd->reftag_seed = cmd->t_task_lba;
 		break;
@@ -638,12 +670,12 @@
 		break;
 	case TARGET_DIF_TYPE0_PROT:
 	default:
-		return true;
+		return TCM_NO_SENSE;
 	}
 
 	if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
 				   is_write, cmd))
-		return false;
+		return TCM_INVALID_CDB_FIELD;
 
 	cmd->prot_type = dev->dev_attrib.pi_prot_type;
 	cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@
 		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
 		 cmd->prot_op, cmd->prot_checks);
 
-	return true;
+	return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+	if (cdb[1] & 0x10) {
+		if (!dev->dev_attrib.emulate_dpo) {
+			pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+			       " does not advertise support for DPO\n", cdb[0]);
+			return -EINVAL;
+		}
+	}
+	if (cdb[1] & 0x8) {
+		if (!dev->dev_attrib.emulate_fua_write ||
+		    !dev->dev_attrib.emulate_write_cache) {
+			pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+			       " does not advertise support for FUA write\n",
+			       cdb[0]);
+			return -EINVAL;
+		}
+		cmd->se_cmd_flags |= SCF_FUA;
+	}
+	return 0;
 }
 
 sense_reason_t
@@ -686,8 +741,12 @@
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+		if (ret)
+			return ret;
 
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@
 		sectors = transport_get_sectors_10(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
 
-		if (cdb[1] & 0x8)
-			cmd->se_cmd_flags |= SCF_FUA;
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@
 		sectors = transport_get_sectors_12(cdb);
 		cmd->t_task_lba = transport_lba_32(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
 
-		if (cdb[1] & 0x8)
-			cmd->se_cmd_flags |= SCF_FUA;
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@
 		sectors = transport_get_sectors_16(cdb);
 		cmd->t_task_lba = transport_lba_64(cdb);
 
-		if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
 
-		if (cdb[1] & 0x8)
-			cmd->se_cmd_flags |= SCF_FUA;
+		ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+		if (ret)
+			return ret;
+
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@
 			return TCM_INVALID_CDB_FIELD;
 		sectors = transport_get_sectors_10(cdb);
 
+		if (sbc_check_dpofua(dev, cmd, cdb))
+			return TCM_INVALID_CDB_FIELD;
+
 		cmd->t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 
@@ -777,8 +853,6 @@
 		cmd->execute_rw = ops->execute_rw;
 		cmd->execute_cmd = sbc_execute_rw;
 		cmd->transport_complete_callback = &xdreadwrite_callback;
-		if (cdb[1] & 0x8)
-			cmd->se_cmd_flags |= SCF_FUA;
 		break;
 	case VARIABLE_LENGTH_CMD:
 	{
@@ -787,6 +861,8 @@
 		case XDWRITEREAD_32:
 			sectors = transport_get_sectors_32(cdb);
 
+			if (sbc_check_dpofua(dev, cmd, cdb))
+				return TCM_INVALID_CDB_FIELD;
 			/*
 			 * Use WRITE_32 and READ_32 opcodes for the emulated
 			 * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@
 			cmd->execute_rw = ops->execute_rw;
 			cmd->execute_cmd = sbc_execute_rw;
 			cmd->transport_complete_callback = &xdreadwrite_callback;
-			if (cdb[1] & 0x8)
-				cmd->se_cmd_flags |= SCF_FUA;
 			break;
 		case WRITE_SAME_32:
 			sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@
 		if (!ops->execute_unmap)
 			return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+		if (!dev->dev_attrib.emulate_tpu) {
+			pr_err("Got UNMAP, but backend device has"
+			       " emulate_tpu disabled\n");
+			return TCM_UNSUPPORTED_SCSI_OPCODE;
+		}
 		size = get_unaligned_be16(&cdb[7]);
 		cmd->execute_cmd = ops->execute_unmap;
 		break;
@@ -955,7 +1034,8 @@
 		unsigned long long end_lba;
 check_lba:
 		end_lba = dev->transport->get_blocks(dev) + 1;
-		if (cmd->t_task_lba + sectors > end_lba) {
+		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+		    ((cmd->t_task_lba + sectors) > end_lba)) {
 			pr_err("cmd exceeds last lba %llu "
 				"(lba %llu, sectors %u)\n",
 				end_lba, cmd->t_task_lba, sectors);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4c71657..460e931 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -647,7 +647,7 @@
 	 * support the use of the WRITE SAME (16) command to unmap LBAs.
 	 */
 	if (dev->dev_attrib.emulate_tpws != 0)
-		buf[5] |= 0x40;
+		buf[5] |= 0x40 | 0x20;
 
 	return 0;
 }
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 25d244c..031018e 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -262,13 +262,12 @@
 	result = acpi_parse_art(priv->adev->handle, &priv->art_count,
 				&priv->arts, true);
 	if (result)
-		goto free_priv;
-
+		dev_dbg(&pdev->dev, "_ART table parsing error\n");
 
 	result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
 				&priv->trts, true);
 	if (result)
-		goto free_art;
+		dev_dbg(&pdev->dev, "_TRT table parsing error\n");
 
 	platform_set_drvdata(pdev, priv);
 
@@ -281,7 +280,7 @@
 						&int3400_thermal_params, 0, 0);
 	if (IS_ERR(priv->thermal)) {
 		result = PTR_ERR(priv->thermal);
-		goto free_trt;
+		goto free_art_trt;
 	}
 
 	priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
@@ -295,9 +294,8 @@
 
 free_zone:
 	thermal_zone_device_unregister(priv->thermal);
-free_trt:
+free_art_trt:
 	kfree(priv->trts);
-free_art:
 	kfree(priv->arts);
 free_priv:
 	kfree(priv);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index f88b088..1e25133 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -208,7 +208,7 @@
 				trip_cnt, GFP_KERNEL);
 		if (!int34x_thermal_zone->aux_trips) {
 			ret = -ENOMEM;
-			goto free_mem;
+			goto err_trip_alloc;
 		}
 		trip_mask = BIT(trip_cnt) - 1;
 		int34x_thermal_zone->aux_trip_nr = trip_cnt;
@@ -248,14 +248,15 @@
 						0, 0);
 	if (IS_ERR(int34x_thermal_zone->zone)) {
 		ret = PTR_ERR(int34x_thermal_zone->zone);
-		goto free_lpat;
+		goto err_thermal_zone;
 	}
 
 	return int34x_thermal_zone;
 
-free_lpat:
+err_thermal_zone:
 	acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
-free_mem:
+	kfree(int34x_thermal_zone->aux_trips);
+err_trip_alloc:
 	kfree(int34x_thermal_zone);
 	return ERR_PTR(ret);
 }
@@ -266,6 +267,7 @@
 {
 	thermal_zone_device_unregister(int34x_thermal_zone->zone);
 	acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+	kfree(int34x_thermal_zone->aux_trips);
 	kfree(int34x_thermal_zone);
 }
 EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6ceebd6..12623bc 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@
 	{ X86_VENDOR_INTEL, 6, 0x45},
 	{ X86_VENDOR_INTEL, 6, 0x46},
 	{ X86_VENDOR_INTEL, 6, 0x4c},
+	{ X86_VENDOR_INTEL, 6, 0x4d},
 	{ X86_VENDOR_INTEL, 6, 0x56},
 	{}
 };
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2580a48..fe4e767 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -387,21 +387,9 @@
 
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (irq) {
-		int ret;
-
 		/*
 		 * platform has IRQ support.
 		 * Then, driver uses common registers
-		 */
-
-		ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
-				       dev_name(dev), common);
-		if (ret) {
-			dev_err(dev, "irq request failed\n ");
-			return ret;
-		}
-
-		/*
 		 * rcar_has_irq_support() will be enabled
 		 */
 		res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
@@ -456,8 +444,16 @@
 	}
 
 	/* enable temperature comparation */
-	if (irq)
+	if (irq) {
+		ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+				       dev_name(dev), common);
+		if (ret) {
+			dev_err(dev, "irq request failed\n ");
+			goto error_unregister;
+		}
+
 		rcar_thermal_common_write(common, ENR, enr_bits);
+	}
 
 	platform_set_drvdata(pdev, common);
 
@@ -467,9 +463,9 @@
 
 error_unregister:
 	rcar_thermal_for_each_priv(priv, common) {
-		thermal_zone_device_unregister(priv->zone);
 		if (rcar_has_irq_support(priv))
 			rcar_thermal_irq_disable(priv);
+		thermal_zone_device_unregister(priv->zone);
 	}
 
 	pm_runtime_put(dev);
@@ -485,9 +481,9 @@
 	struct rcar_thermal_priv *priv;
 
 	rcar_thermal_for_each_priv(priv, common) {
-		thermal_zone_device_unregister(priv->zone);
 		if (rcar_has_irq_support(priv))
 			rcar_thermal_irq_disable(priv);
+		thermal_zone_device_unregister(priv->zone);
 	}
 
 	pm_runtime_put(dev);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fbeedc0..1d30b09 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -682,6 +682,7 @@
 
 	if (on) {
 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+		con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
 		interrupt_en =
 			(of_thermal_is_trip_valid(tz, 7)
 			<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
@@ -704,9 +705,9 @@
 			interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
 	} else {
 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+		con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
 		interrupt_en = 0; /* Disable all interrupts */
 	}
-	con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
 
 	writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
@@ -716,7 +717,7 @@
 {
 	struct exynos_tmu_data *data = p;
 
-	if (!data)
+	if (!data || !data->tmu_read)
 		return -EINVAL;
 
 	mutex_lock(&data->lock);
@@ -918,34 +919,16 @@
 }
 
 static const struct of_device_id exynos_tmu_match[] = {
-	{
-		.compatible = "samsung,exynos3250-tmu",
-	},
-	{
-		.compatible = "samsung,exynos4210-tmu",
-	},
-	{
-		.compatible = "samsung,exynos4412-tmu",
-	},
-	{
-		.compatible = "samsung,exynos5250-tmu",
-	},
-	{
-		.compatible = "samsung,exynos5260-tmu",
-	},
-	{
-		.compatible = "samsung,exynos5420-tmu",
-	},
-	{
-		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
-	},
-	{
-		.compatible = "samsung,exynos5440-tmu",
-	},
-	{
-		.compatible = "samsung,exynos7-tmu",
-	},
-	{},
+	{ .compatible = "samsung,exynos3250-tmu", },
+	{ .compatible = "samsung,exynos4210-tmu", },
+	{ .compatible = "samsung,exynos4412-tmu", },
+	{ .compatible = "samsung,exynos5250-tmu", },
+	{ .compatible = "samsung,exynos5260-tmu", },
+	{ .compatible = "samsung,exynos5420-tmu", },
+	{ .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
+	{ .compatible = "samsung,exynos5440-tmu", },
+	{ .compatible = "samsung,exynos7-tmu", },
+	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
 
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 48491d1..174d3bc 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -899,6 +899,22 @@
 		return sprintf(buf, "%d\n", instance->trip);
 }
 
+static struct attribute *cooling_device_attrs[] = {
+	&dev_attr_cdev_type.attr,
+	&dev_attr_max_state.attr,
+	&dev_attr_cur_state.attr,
+	NULL,
+};
+
+static const struct attribute_group cooling_device_attr_group = {
+	.attrs = cooling_device_attrs,
+};
+
+static const struct attribute_group *cooling_device_attr_groups[] = {
+	&cooling_device_attr_group,
+	NULL,
+};
+
 /* Device management */
 
 /**
@@ -1130,6 +1146,7 @@
 	cdev->ops = ops;
 	cdev->updated = false;
 	cdev->device.class = &thermal_class;
+	cdev->device.groups = cooling_device_attr_groups;
 	cdev->devdata = devdata;
 	dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
 	result = device_register(&cdev->device);
@@ -1139,21 +1156,6 @@
 		return ERR_PTR(result);
 	}
 
-	/* sys I/F */
-	if (type) {
-		result = device_create_file(&cdev->device, &dev_attr_cdev_type);
-		if (result)
-			goto unregister;
-	}
-
-	result = device_create_file(&cdev->device, &dev_attr_max_state);
-	if (result)
-		goto unregister;
-
-	result = device_create_file(&cdev->device, &dev_attr_cur_state);
-	if (result)
-		goto unregister;
-
 	/* Add 'this' new cdev to the global cdev list */
 	mutex_lock(&thermal_list_lock);
 	list_add(&cdev->node, &thermal_cdev_list);
@@ -1163,11 +1165,6 @@
 	bind_cdev(cdev);
 
 	return cdev;
-
-unregister:
-	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
-	device_unregister(&cdev->device);
-	return ERR_PTR(result);
 }
 
 /**
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 634b6ce..62a5d44 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1402,7 +1402,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
 {
 	int i;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 3fb054a..a38c175 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -429,7 +429,7 @@
 
 	data = ti_bandgap_get_sensor_data(bgp, id);
 
-	if (data && data->cool_dev)
+	if (data)
 		cpufreq_cooling_unregister(data->cool_dev);
 
 	return 0;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index d7b198c..ce24182 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -210,18 +210,6 @@
 	return circ_cnt(&bfin_jc_write_buf);
 }
 
-static void
-bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-	unsigned long expire = jiffies + timeout;
-	while (!circ_empty(&bfin_jc_write_buf)) {
-		if (signal_pending(current))
-			break;
-		if (time_after(jiffies, expire))
-			break;
-	}
-}
-
 static const struct tty_operations bfin_jc_ops = {
 	.open            = bfin_jc_open,
 	.close           = bfin_jc_close,
@@ -230,7 +218,6 @@
 	.flush_chars     = bfin_jc_flush_chars,
 	.write_room      = bfin_jc_write_room,
 	.chars_in_buffer = bfin_jc_chars_in_buffer,
-	.wait_until_sent = bfin_jc_wait_until_sent,
 };
 
 static int __init bfin_jc_init(void)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e3b9570a..deae122 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2138,8 +2138,8 @@
 	/*
 	 * Clear the interrupt registers.
 	 */
-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-		serial_port_in(port, UART_RX);
+	serial_port_in(port, UART_LSR);
+	serial_port_in(port, UART_RX);
 	serial_port_in(port, UART_IIR);
 	serial_port_in(port, UART_MSR);
 
@@ -2300,8 +2300,8 @@
 	 * saved flags to avoid getting false values from polling
 	 * routines or the previous session.
 	 */
-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-		serial_port_in(port, UART_RX);
+	serial_port_in(port, UART_LSR);
+	serial_port_in(port, UART_RX);
 	serial_port_in(port, UART_IIR);
 	serial_port_in(port, UART_MSR);
 	up->lsr_saved_flags = 0;
@@ -2394,8 +2394,7 @@
 	 * Read data port to reset things, and then unlink from
 	 * the IRQ chain.
 	 */
-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
-		serial_port_in(port, UART_RX);
+	serial_port_in(port, UART_RX);
 	serial8250_rpm_put(up);
 
 	del_timer_sync(&up->timer);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index e601162..2ab229d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -59,6 +59,8 @@
 	u8			usr_reg;
 	int			last_mcr;
 	int			line;
+	int			msr_mask_on;
+	int			msr_mask_off;
 	struct clk		*clk;
 	struct clk		*pclk;
 	struct reset_control	*rst;
@@ -81,6 +83,12 @@
 		value &= ~UART_MSR_DCTS;
 	}
 
+	/* Override any modem control signals if needed */
+	if (offset == UART_MSR) {
+		value |= d->msr_mask_on;
+		value &= ~d->msr_mask_off;
+	}
+
 	return value;
 }
 
@@ -334,6 +342,30 @@
 	if (id >= 0)
 		p->line = id;
 
+	if (of_property_read_bool(np, "dcd-override")) {
+		/* Always report DCD as active */
+		data->msr_mask_on |= UART_MSR_DCD;
+		data->msr_mask_off |= UART_MSR_DDCD;
+	}
+
+	if (of_property_read_bool(np, "dsr-override")) {
+		/* Always report DSR as active */
+		data->msr_mask_on |= UART_MSR_DSR;
+		data->msr_mask_off |= UART_MSR_DDSR;
+	}
+
+	if (of_property_read_bool(np, "cts-override")) {
+		/* Always report DSR as active */
+		data->msr_mask_on |= UART_MSR_DSR;
+		data->msr_mask_off |= UART_MSR_DDSR;
+	}
+
+	if (of_property_read_bool(np, "ri-override")) {
+		/* Always report Ring indicator as inactive */
+		data->msr_mask_off |= UART_MSR_RI;
+		data->msr_mask_off |= UART_MSR_TERI;
+	}
+
 	/* clock got configured through clk api, all done */
 	if (p->uartclk)
 		return 0;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index daf2c82..892eb32 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -69,7 +69,7 @@
 	       "Please send the output of lspci -vv, this\n"
 	       "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
 	       "manufacturer and name of serial board or\n"
-	       "modem board to rmk+serial@arm.linux.org.uk.\n",
+	       "modem board to <linux-serial@vger.kernel.org>.\n",
 	       pci_name(dev), str, dev->vendor, dev->device,
 	       dev->subsystem_vendor, dev->subsystem_device);
 }
@@ -1989,13 +1989,6 @@
 	},
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_QRK_UART,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.setup		= pci_default_setup,
-	},
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
 		.device		= PCI_DEVICE_ID_INTEL_BSW_UART1,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
@@ -2201,13 +2194,6 @@
 	 */
 	{
 		.vendor		= PCI_VENDOR_ID_PLX,
-		.device		= PCI_DEVICE_ID_PLX_9030,
-		.subvendor	= PCI_SUBVENDOR_ID_PERLE,
-		.subdevice	= PCI_ANY_ID,
-		.setup		= pci_default_setup,
-	},
-	{
-		.vendor		= PCI_VENDOR_ID_PLX,
 		.device		= PCI_DEVICE_ID_PLX_9050,
 		.subvendor	= PCI_SUBVENDOR_ID_EXSYS,
 		.subdevice	= PCI_SUBDEVICE_ID_EXSYS_4055,
@@ -5415,10 +5401,6 @@
 		PCI_ANY_ID, PCI_ANY_ID,
 		0, 0, pbn_b0_bt_2_115200 },
 
-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
-		PCI_ANY_ID, PCI_ANY_ID,
-		0, 0, pbn_b0_bt_2_115200 },
-
 	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
 		PCI_ANY_ID, PCI_ANY_ID,
 		0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 846552b..4e959c4 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -47,6 +47,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/irq.h>
+#include <linux/suspend.h>
 
 #include <asm/io.h>
 #include <asm/ioctls.h>
@@ -173,6 +174,12 @@
 	bool			ms_irq_enabled;
 	bool			is_usart;	/* usart or uart */
 	struct timer_list	uart_timer;	/* uart timer */
+
+	bool			suspended;
+	unsigned int		pending;
+	unsigned int		pending_status;
+	spinlock_t		lock_suspended;
+
 	int (*prepare_rx)(struct uart_port *port);
 	int (*prepare_tx)(struct uart_port *port);
 	void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@
 {
 	struct uart_port *port = dev_id;
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-	unsigned int status, pending, pass_counter = 0;
+	unsigned int status, pending, mask, pass_counter = 0;
 	bool gpio_handled = false;
 
+	spin_lock(&atmel_port->lock_suspended);
+
 	do {
 		status = atmel_get_lines_status(port);
-		pending = status & UART_GET_IMR(port);
+		mask = UART_GET_IMR(port);
+		pending = status & mask;
 		if (!gpio_handled) {
 			/*
 			 * Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@
 		if (!pending)
 			break;
 
+		if (atmel_port->suspended) {
+			atmel_port->pending |= pending;
+			atmel_port->pending_status = status;
+			UART_PUT_IDR(port, mask);
+			pm_system_wakeup();
+			break;
+		}
+
 		atmel_handle_receive(port, pending);
 		atmel_handle_status(port, pending, status);
 		atmel_handle_transmit(port, pending);
 	} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
 
+	spin_unlock(&atmel_port->lock_suspended);
+
 	return pass_counter ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -1742,7 +1762,8 @@
 	/*
 	 * Allocate the IRQ
 	 */
-	retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
+	retval = request_irq(port->irq, atmel_interrupt,
+			IRQF_SHARED | IRQF_COND_SUSPEND,
 			tty ? tty->name : "atmel_serial", port);
 	if (retval) {
 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@
 
 	/* we can not wake up if we're running on slow clock */
 	atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
-	if (atmel_serial_clk_will_stop())
+	if (atmel_serial_clk_will_stop()) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+		atmel_port->suspended = true;
+		spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
 		device_set_wakeup_enable(&pdev->dev, 0);
+	}
 
 	uart_suspend_port(&atmel_uart, port);
 
@@ -2525,6 +2552,18 @@
 {
 	struct uart_port *port = platform_get_drvdata(pdev);
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+	if (atmel_port->pending) {
+		atmel_handle_receive(port, atmel_port->pending);
+		atmel_handle_status(port, atmel_port->pending,
+				    atmel_port->pending_status);
+		atmel_handle_transmit(port, atmel_port->pending);
+		atmel_port->pending = 0;
+	}
+	atmel_port->suspended = false;
+	spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
 
 	uart_resume_port(&atmel_uart, port);
 	device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@
 	port->backup_imr = 0;
 	port->uart.line = ret;
 
+	spin_lock_init(&port->lock_suspended);
+
 	ret = atmel_init_gpios(port, &pdev->dev);
 	if (ret < 0)
 		dev_err(&pdev->dev, "%s",
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 7ff61e2..33fb94f 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -133,10 +133,6 @@
 	if (of_find_property(np, "no-loopback-test", NULL))
 		port->flags |= UPF_SKIP_TEST;
 
-	ret = of_alias_get_id(np, "serial");
-	if (ret >= 0)
-		port->line = ret;
-
 	port->dev = &ofdev->dev;
 
 	switch (type) {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 594b633..bca975f 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -293,8 +293,10 @@
 
 	ims = serial_in(port, SPRD_IMSR);
 
-	if (!ims)
+	if (!ims) {
+		spin_unlock(&port->lock);
 		return IRQ_NONE;
+	}
 
 	serial_out(port, SPRD_ICLR, ~0);
 
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 51f066a..2bb4dfc 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1028,8 +1028,8 @@
 /* We limit tty time update visibility to every 8 seconds or so. */
 static void tty_update_time(struct timespec *time)
 {
-	unsigned long sec = get_seconds() & ~7;
-	if ((long)(sec - time->tv_sec) > 0)
+	unsigned long sec = get_seconds();
+	if (abs(sec - time->tv_sec) & ~7)
 		time->tv_sec = sec;
 }
 
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index a5cf253..632fc81 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -217,11 +217,17 @@
 #endif
 	if (!timeout)
 		timeout = MAX_SCHEDULE_TIMEOUT;
-	if (wait_event_interruptible_timeout(tty->write_wait,
-			!tty_chars_in_buffer(tty), timeout) >= 0) {
-		if (tty->ops->wait_until_sent)
-			tty->ops->wait_until_sent(tty, timeout);
-	}
+
+	timeout = wait_event_interruptible_timeout(tty->write_wait,
+			!tty_chars_in_buffer(tty), timeout);
+	if (timeout <= 0)
+		return;
+
+	if (timeout == MAX_SCHEDULE_TIMEOUT)
+		timeout = 0;
+
+	if (tty->ops->wait_until_sent)
+		tty->ops->wait_until_sent(tty, timeout);
 }
 EXPORT_SYMBOL(tty_wait_until_sent);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e78720b..6836177 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1650,6 +1650,8 @@
 
 static const struct usb_device_id acm_ids[] = {
 	/* quirky and broken devices */
+	{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
+	.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
 	{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
 	.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
 	{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 66abdbc..1163553 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -501,6 +501,7 @@
 	as->status = urb->status;
 	signr = as->signr;
 	if (signr) {
+		memset(&sinfo, 0, sizeof(sinfo));
 		sinfo.si_signo = as->signr;
 		sinfo.si_errno = as->status;
 		sinfo.si_code = SI_ASYNCIO;
@@ -2382,6 +2383,7 @@
 		wake_up_all(&ps->wait);
 		list_del_init(&ps->list);
 		if (ps->discsignr) {
+			memset(&sinfo, 0, sizeof(sinfo));
 			sinfo.si_signo = ps->discsignr;
 			sinfo.si_errno = EPIPE;
 			sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 172d64e..52e0c4e 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -205,6 +205,18 @@
 						omap->irq0_offset, value);
 }
 
+static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
+{
+	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
+						omap->irqmisc_offset, value);
+}
+
+static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
+{
+	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
+						omap->irq0_offset, value);
+}
+
 static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
 	enum omap_dwc3_vbus_id_status status)
 {
@@ -345,9 +357,23 @@
 
 static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
 {
+	u32			reg;
+
 	/* disable all IRQs */
-	dwc3_omap_write_irqmisc_set(omap, 0x00);
-	dwc3_omap_write_irq0_set(omap, 0x00);
+	reg = USBOTGSS_IRQO_COREIRQ_ST;
+	dwc3_omap_write_irq0_clr(omap, reg);
+
+	reg = (USBOTGSS_IRQMISC_OEVT |
+			USBOTGSS_IRQMISC_DRVVBUS_RISE |
+			USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+			USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+			USBOTGSS_IRQMISC_IDPULLUP_RISE |
+			USBOTGSS_IRQMISC_DRVVBUS_FALL |
+			USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+			USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+			USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+	dwc3_omap_write_irqmisc_clr(omap, reg);
 }
 
 static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 7564814..c42765b 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1161,7 +1161,6 @@
 	if (desc->opts_mutex)
 		mutex_lock(desc->opts_mutex);
 	memcpy(desc->ext_compat_id, page, l);
-	desc->ext_compat_id[l] = '\0';
 
 	if (desc->opts_mutex)
 		mutex_unlock(desc->opts_mutex);
@@ -1192,7 +1191,6 @@
 	if (desc->opts_mutex)
 		mutex_lock(desc->opts_mutex);
 	memcpy(desc->ext_compat_id + 8, page, l);
-	desc->ext_compat_id[l + 8] = '\0';
 
 	if (desc->opts_mutex)
 		mutex_unlock(desc->opts_mutex);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index af98b09..175c995 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -144,10 +144,9 @@
 	bool read;
 
 	struct kiocb *kiocb;
-	const struct iovec *iovec;
-	unsigned long nr_segs;
-	char __user *buf;
-	size_t len;
+	struct iov_iter data;
+	const void *to_free;
+	char *buf;
 
 	struct mm_struct *mm;
 	struct work_struct work;
@@ -649,29 +648,10 @@
 					 io_data->req->actual;
 
 	if (io_data->read && ret > 0) {
-		int i;
-		size_t pos = 0;
-
-		/*
-		 * Since req->length may be bigger than io_data->len (after
-		 * being rounded up to maxpacketsize), we may end up with more
-		 * data then user space has space for.
-		 */
-		ret = min_t(int, ret, io_data->len);
-
 		use_mm(io_data->mm);
-		for (i = 0; i < io_data->nr_segs; i++) {
-			size_t len = min_t(size_t, ret - pos,
-					io_data->iovec[i].iov_len);
-			if (!len)
-				break;
-			if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
-						 &io_data->buf[pos], len))) {
-				ret = -EFAULT;
-				break;
-			}
-			pos += len;
-		}
+		ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+		if (iov_iter_count(&io_data->data))
+			ret = -EFAULT;
 		unuse_mm(io_data->mm);
 	}
 
@@ -684,7 +664,7 @@
 
 	io_data->kiocb->private = NULL;
 	if (io_data->read)
-		kfree(io_data->iovec);
+		kfree(io_data->to_free);
 	kfree(io_data->buf);
 	kfree(io_data);
 }
@@ -743,6 +723,7 @@
 		 * before the waiting completes, so do not assign to 'gadget' earlier
 		 */
 		struct usb_gadget *gadget = epfile->ffs->gadget;
+		size_t copied;
 
 		spin_lock_irq(&epfile->ffs->eps_lock);
 		/* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@
 			spin_unlock_irq(&epfile->ffs->eps_lock);
 			return -ESHUTDOWN;
 		}
+		data_len = iov_iter_count(&io_data->data);
 		/*
 		 * Controller may require buffer size to be aligned to
 		 * maxpacketsize of an out endpoint.
 		 */
-		data_len = io_data->read ?
-			   usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
-			   io_data->len;
+		if (io_data->read)
+			data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
 		spin_unlock_irq(&epfile->ffs->eps_lock);
 
 		data = kmalloc(data_len, GFP_KERNEL);
 		if (unlikely(!data))
 			return -ENOMEM;
-		if (io_data->aio && !io_data->read) {
-			int i;
-			size_t pos = 0;
-			for (i = 0; i < io_data->nr_segs; i++) {
-				if (unlikely(copy_from_user(&data[pos],
-					     io_data->iovec[i].iov_base,
-					     io_data->iovec[i].iov_len))) {
-					ret = -EFAULT;
-					goto error;
-				}
-				pos += io_data->iovec[i].iov_len;
-			}
-		} else {
-			if (!io_data->read &&
-			    unlikely(__copy_from_user(data, io_data->buf,
-						      io_data->len))) {
+		if (!io_data->read) {
+			copied = copy_from_iter(data, data_len, &io_data->data);
+			if (copied != data_len) {
 				ret = -EFAULT;
 				goto error;
 			}
@@ -876,10 +844,8 @@
 				 */
 				ret = ep->status;
 				if (io_data->read && ret > 0) {
-					ret = min_t(size_t, ret, io_data->len);
-
-					if (unlikely(copy_to_user(io_data->buf,
-						data, ret)))
+					ret = copy_to_iter(data, ret, &io_data->data);
+					if (unlikely(iov_iter_count(&io_data->data)))
 						ret = -EFAULT;
 				}
 			}
@@ -898,37 +864,6 @@
 	return ret;
 }
 
-static ssize_t
-ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
-		 loff_t *ptr)
-{
-	struct ffs_io_data io_data;
-
-	ENTER();
-
-	io_data.aio = false;
-	io_data.read = false;
-	io_data.buf = (char * __user)buf;
-	io_data.len = len;
-
-	return ffs_epfile_io(file, &io_data);
-}
-
-static ssize_t
-ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
-{
-	struct ffs_io_data io_data;
-
-	ENTER();
-
-	io_data.aio = false;
-	io_data.read = true;
-	io_data.buf = buf;
-	io_data.len = len;
-
-	return ffs_epfile_io(file, &io_data);
-}
-
 static int
 ffs_epfile_open(struct inode *inode, struct file *file)
 {
@@ -965,67 +900,86 @@
 	return value;
 }
 
-static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
-				    const struct iovec *iovec,
-				    unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 {
-	struct ffs_io_data *io_data;
+	struct ffs_io_data io_data, *p = &io_data;
+	ssize_t res;
 
 	ENTER();
 
-	io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
-	if (unlikely(!io_data))
-		return -ENOMEM;
-
-	io_data->aio = true;
-	io_data->read = false;
-	io_data->kiocb = kiocb;
-	io_data->iovec = iovec;
-	io_data->nr_segs = nr_segs;
-	io_data->len = kiocb->ki_nbytes;
-	io_data->mm = current->mm;
-
-	kiocb->private = io_data;
-
-	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
-
-	return ffs_epfile_io(kiocb->ki_filp, io_data);
-}
-
-static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
-				   const struct iovec *iovec,
-				   unsigned long nr_segs, loff_t loff)
-{
-	struct ffs_io_data *io_data;
-	struct iovec *iovec_copy;
-
-	ENTER();
-
-	iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
-	if (unlikely(!iovec_copy))
-		return -ENOMEM;
-
-	memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
-
-	io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
-	if (unlikely(!io_data)) {
-		kfree(iovec_copy);
-		return -ENOMEM;
+	if (!is_sync_kiocb(kiocb)) {
+		p = kmalloc(sizeof(io_data), GFP_KERNEL);
+		if (unlikely(!p))
+			return -ENOMEM;
+		p->aio = true;
+	} else {
+		p->aio = false;
 	}
 
-	io_data->aio = true;
-	io_data->read = true;
-	io_data->kiocb = kiocb;
-	io_data->iovec = iovec_copy;
-	io_data->nr_segs = nr_segs;
-	io_data->len = kiocb->ki_nbytes;
-	io_data->mm = current->mm;
+	p->read = false;
+	p->kiocb = kiocb;
+	p->data = *from;
+	p->mm = current->mm;
 
-	kiocb->private = io_data;
+	kiocb->private = p;
 
 	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
-	return ffs_epfile_io(kiocb->ki_filp, io_data);
+	res = ffs_epfile_io(kiocb->ki_filp, p);
+	if (res == -EIOCBQUEUED)
+		return res;
+	if (p->aio)
+		kfree(p);
+	else
+		*from = p->data;
+	return res;
+}
+
+static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+{
+	struct ffs_io_data io_data, *p = &io_data;
+	ssize_t res;
+
+	ENTER();
+
+	if (!is_sync_kiocb(kiocb)) {
+		p = kmalloc(sizeof(io_data), GFP_KERNEL);
+		if (unlikely(!p))
+			return -ENOMEM;
+		p->aio = true;
+	} else {
+		p->aio = false;
+	}
+
+	p->read = true;
+	p->kiocb = kiocb;
+	if (p->aio) {
+		p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
+		if (!p->to_free) {
+			kfree(p);
+			return -ENOMEM;
+		}
+	} else {
+		p->data = *to;
+		p->to_free = NULL;
+	}
+	p->mm = current->mm;
+
+	kiocb->private = p;
+
+	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+
+	res = ffs_epfile_io(kiocb->ki_filp, p);
+	if (res == -EIOCBQUEUED)
+		return res;
+
+	if (p->aio) {
+		kfree(p->to_free);
+		kfree(p);
+	} else {
+		*to = p->data;
+	}
+	return res;
 }
 
 static int
@@ -1105,10 +1059,10 @@
 	.llseek =	no_llseek,
 
 	.open =		ffs_epfile_open,
-	.write =	ffs_epfile_write,
-	.read =		ffs_epfile_read,
-	.aio_write =	ffs_epfile_aio_write,
-	.aio_read =	ffs_epfile_aio_read,
+	.write =	new_sync_write,
+	.read =		new_sync_read,
+	.write_iter =	ffs_epfile_write_iter,
+	.read_iter =	ffs_epfile_read_iter,
 	.release =	ffs_epfile_release,
 	.unlocked_ioctl =	ffs_epfile_ioctl,
 };
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 426d69a..a2612fb 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -569,7 +569,7 @@
 	return status;
 }
 
-const struct file_operations f_hidg_fops = {
+static const struct file_operations f_hidg_fops = {
 	.owner		= THIS_MODULE,
 	.open		= f_hidg_open,
 	.release	= f_hidg_release,
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index c89e96c..c0c3ef2 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -417,7 +417,10 @@
 			return -EINVAL;
 
 		spin_lock(&port->lock);
-		__pn_reset(f);
+
+		if (fp->in_ep->driver_data)
+			__pn_reset(f);
+
 		if (alt == 1) {
 			int i;
 
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e07c50c..e3dae47 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -344,7 +344,7 @@
 	.bInterval =		USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
 };
 
-struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
 	.bLength =		USB_DT_SS_EP_COMP_SIZE,
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
@@ -362,7 +362,7 @@
 	.bInterval =		USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
 };
 
-struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
+static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
 	.bLength =		USB_DT_SS_EP_COMP_SIZE,
 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
 
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 33e1665..6d3eb8b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -54,7 +54,7 @@
 #define UNFLW_CTRL	8
 #define OVFLW_CTRL	10
 
-const char *uac2_name = "snd_uac2";
+static const char *uac2_name = "snd_uac2";
 
 struct uac2_req {
 	struct uac2_rtd_params *pp; /* parent param */
@@ -634,7 +634,7 @@
 };
 
 /* Clock source for IN traffic */
-struct uac_clock_source_descriptor in_clk_src_desc = {
+static struct uac_clock_source_descriptor in_clk_src_desc = {
 	.bLength = sizeof in_clk_src_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -646,7 +646,7 @@
 };
 
 /* Clock source for OUT traffic */
-struct uac_clock_source_descriptor out_clk_src_desc = {
+static struct uac_clock_source_descriptor out_clk_src_desc = {
 	.bLength = sizeof out_clk_src_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -658,7 +658,7 @@
 };
 
 /* Input Terminal for USB_OUT */
-struct uac2_input_terminal_descriptor usb_out_it_desc = {
+static struct uac2_input_terminal_descriptor usb_out_it_desc = {
 	.bLength = sizeof usb_out_it_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -672,7 +672,7 @@
 };
 
 /* Input Terminal for I/O-In */
-struct uac2_input_terminal_descriptor io_in_it_desc = {
+static struct uac2_input_terminal_descriptor io_in_it_desc = {
 	.bLength = sizeof io_in_it_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -686,7 +686,7 @@
 };
 
 /* Ouput Terminal for USB_IN */
-struct uac2_output_terminal_descriptor usb_in_ot_desc = {
+static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
 	.bLength = sizeof usb_in_ot_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -700,7 +700,7 @@
 };
 
 /* Ouput Terminal for I/O-Out */
-struct uac2_output_terminal_descriptor io_out_ot_desc = {
+static struct uac2_output_terminal_descriptor io_out_ot_desc = {
 	.bLength = sizeof io_out_ot_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -713,7 +713,7 @@
 	.bmControls = (CONTROL_RDWR << COPY_CTRL),
 };
 
-struct uac2_ac_header_descriptor ac_hdr_desc = {
+static struct uac2_ac_header_descriptor ac_hdr_desc = {
 	.bLength = sizeof ac_hdr_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -751,7 +751,7 @@
 };
 
 /* Audio Stream OUT Intface Desc */
-struct uac2_as_header_descriptor as_out_hdr_desc = {
+static struct uac2_as_header_descriptor as_out_hdr_desc = {
 	.bLength = sizeof as_out_hdr_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -764,7 +764,7 @@
 };
 
 /* Audio USB_OUT Format */
-struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
+static struct uac2_format_type_i_descriptor as_out_fmt1_desc = {
 	.bLength = sizeof as_out_fmt1_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -772,7 +772,7 @@
 };
 
 /* STD AS ISO OUT Endpoint */
-struct usb_endpoint_descriptor fs_epout_desc = {
+static struct usb_endpoint_descriptor fs_epout_desc = {
 	.bLength = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
@@ -782,7 +782,7 @@
 	.bInterval = 1,
 };
 
-struct usb_endpoint_descriptor hs_epout_desc = {
+static struct usb_endpoint_descriptor hs_epout_desc = {
 	.bLength = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
@@ -828,7 +828,7 @@
 };
 
 /* Audio Stream IN Intface Desc */
-struct uac2_as_header_descriptor as_in_hdr_desc = {
+static struct uac2_as_header_descriptor as_in_hdr_desc = {
 	.bLength = sizeof as_in_hdr_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 
@@ -841,7 +841,7 @@
 };
 
 /* Audio USB_IN Format */
-struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
+static struct uac2_format_type_i_descriptor as_in_fmt1_desc = {
 	.bLength = sizeof as_in_fmt1_desc,
 	.bDescriptorType = USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype = UAC_FORMAT_TYPE,
@@ -849,7 +849,7 @@
 };
 
 /* STD AS ISO IN Endpoint */
-struct usb_endpoint_descriptor fs_epin_desc = {
+static struct usb_endpoint_descriptor fs_epin_desc = {
 	.bLength = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
@@ -859,7 +859,7 @@
 	.bInterval = 1,
 };
 
-struct usb_endpoint_descriptor hs_epin_desc = {
+static struct usb_endpoint_descriptor hs_epin_desc = {
 	.bLength = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
@@ -1563,7 +1563,7 @@
 		agdev->out_ep->driver_data = NULL;
 }
 
-struct usb_function *afunc_alloc(struct usb_function_instance *fi)
+static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
 {
 	struct audio_dev *agdev;
 	struct f_uac2_opts *opts;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 5aad7fe..8b818fd 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -27,6 +27,7 @@
 #include "uvc.h"
 #include "uvc_queue.h"
 #include "uvc_video.h"
+#include "uvc_v4l2.h"
 
 /* --------------------------------------------------------------------------
  * Requests handling
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 9cb86bc..50a5e63 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -21,6 +21,7 @@
 
 #include "uvc.h"
 #include "uvc_queue.h"
+#include "uvc_video.h"
 
 /* --------------------------------------------------------------------------
  * Video codecs
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 06acfa5..b01b88e 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -133,7 +133,9 @@
 	struct usb_configuration c;
 	int (*eth)(struct usb_configuration *c);
 	int num;
-} gfs_configurations[] = {
+};
+
+static struct gfs_configuration gfs_configurations[] = {
 #ifdef CONFIG_USB_FUNCTIONFS_RNDIS
 	{
 		.eth		= bind_rndis_config,
@@ -278,7 +280,7 @@
 	if (!try_module_get(THIS_MODULE))
 		return ERR_PTR(-ENOENT);
 	
-	return 0;
+	return NULL;
 }
 
 static void functionfs_release_dev(struct ffs_dev *dev)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index db49ec4..200f9a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -74,6 +74,8 @@
 MODULE_AUTHOR ("David Brownell");
 MODULE_LICENSE ("GPL");
 
+static int ep_open(struct inode *, struct file *);
+
 
 /*----------------------------------------------------------------------*/
 
@@ -283,14 +285,15 @@
  * still need dev->lock to use epdata->ep.
  */
 static int
-get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 {
 	int	val;
 
 	if (f_flags & O_NONBLOCK) {
 		if (!mutex_trylock(&epdata->lock))
 			goto nonblock;
-		if (epdata->state != STATE_EP_ENABLED) {
+		if (epdata->state != STATE_EP_ENABLED &&
+		    (!is_write || epdata->state != STATE_EP_READY)) {
 			mutex_unlock(&epdata->lock);
 nonblock:
 			val = -EAGAIN;
@@ -305,18 +308,20 @@
 
 	switch (epdata->state) {
 	case STATE_EP_ENABLED:
+		return 0;
+	case STATE_EP_READY:			/* not configured yet */
+		if (is_write)
+			return 0;
+		// FALLTHRU
+	case STATE_EP_UNBOUND:			/* clean disconnect */
 		break;
 	// case STATE_EP_DISABLED:		/* "can't happen" */
-	// case STATE_EP_READY:			/* "can't happen" */
 	default:				/* error! */
 		pr_debug ("%s: ep %p not available, state %d\n",
 				shortname, epdata, epdata->state);
-		// FALLTHROUGH
-	case STATE_EP_UNBOUND:			/* clean disconnect */
-		val = -ENODEV;
-		mutex_unlock(&epdata->lock);
 	}
-	return val;
+	mutex_unlock(&epdata->lock);
+	return -ENODEV;
 }
 
 static ssize_t
@@ -363,97 +368,6 @@
 	return value;
 }
 
-
-/* handle a synchronous OUT bulk/intr/iso transfer */
-static ssize_t
-ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
-{
-	struct ep_data		*data = fd->private_data;
-	void			*kbuf;
-	ssize_t			value;
-
-	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
-		return value;
-
-	/* halt any endpoint by doing a "wrong direction" i/o call */
-	if (usb_endpoint_dir_in(&data->desc)) {
-		if (usb_endpoint_xfer_isoc(&data->desc)) {
-			mutex_unlock(&data->lock);
-			return -EINVAL;
-		}
-		DBG (data->dev, "%s halt\n", data->name);
-		spin_lock_irq (&data->dev->lock);
-		if (likely (data->ep != NULL))
-			usb_ep_set_halt (data->ep);
-		spin_unlock_irq (&data->dev->lock);
-		mutex_unlock(&data->lock);
-		return -EBADMSG;
-	}
-
-	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
-
-	value = -ENOMEM;
-	kbuf = kmalloc (len, GFP_KERNEL);
-	if (unlikely (!kbuf))
-		goto free1;
-
-	value = ep_io (data, kbuf, len);
-	VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
-		data->name, len, (int) value);
-	if (value >= 0 && copy_to_user (buf, kbuf, value))
-		value = -EFAULT;
-
-free1:
-	mutex_unlock(&data->lock);
-	kfree (kbuf);
-	return value;
-}
-
-/* handle a synchronous IN bulk/intr/iso transfer */
-static ssize_t
-ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
-{
-	struct ep_data		*data = fd->private_data;
-	void			*kbuf;
-	ssize_t			value;
-
-	if ((value = get_ready_ep (fd->f_flags, data)) < 0)
-		return value;
-
-	/* halt any endpoint by doing a "wrong direction" i/o call */
-	if (!usb_endpoint_dir_in(&data->desc)) {
-		if (usb_endpoint_xfer_isoc(&data->desc)) {
-			mutex_unlock(&data->lock);
-			return -EINVAL;
-		}
-		DBG (data->dev, "%s halt\n", data->name);
-		spin_lock_irq (&data->dev->lock);
-		if (likely (data->ep != NULL))
-			usb_ep_set_halt (data->ep);
-		spin_unlock_irq (&data->dev->lock);
-		mutex_unlock(&data->lock);
-		return -EBADMSG;
-	}
-
-	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
-
-	value = -ENOMEM;
-	kbuf = memdup_user(buf, len);
-	if (IS_ERR(kbuf)) {
-		value = PTR_ERR(kbuf);
-		kbuf = NULL;
-		goto free1;
-	}
-
-	value = ep_io (data, kbuf, len);
-	VDEBUG (data->dev, "%s write %zu IN, status %d\n",
-		data->name, len, (int) value);
-free1:
-	mutex_unlock(&data->lock);
-	kfree (kbuf);
-	return value;
-}
-
 static int
 ep_release (struct inode *inode, struct file *fd)
 {
@@ -481,7 +395,7 @@
 	struct ep_data		*data = fd->private_data;
 	int			status;
 
-	if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
 		return status;
 
 	spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@
 	struct mm_struct	*mm;
 	struct work_struct	work;
 	void			*buf;
-	const struct iovec	*iv;
-	unsigned long		nr_segs;
+	struct iov_iter		to;
+	const void		*to_free;
 	unsigned		actual;
 };
 
@@ -541,35 +455,6 @@
 	return value;
 }
 
-static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
-{
-	ssize_t			len, total;
-	void			*to_copy;
-	int			i;
-
-	/* copy stuff into user buffers */
-	total = priv->actual;
-	len = 0;
-	to_copy = priv->buf;
-	for (i=0; i < priv->nr_segs; i++) {
-		ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
-
-		if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
-			if (len == 0)
-				len = -EFAULT;
-			break;
-		}
-
-		total -= this;
-		len += this;
-		to_copy += this;
-		if (total == 0)
-			break;
-	}
-
-	return len;
-}
-
 static void ep_user_copy_worker(struct work_struct *work)
 {
 	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@
 	size_t ret;
 
 	use_mm(mm);
-	ret = ep_copy_to_user(priv);
+	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
 	unuse_mm(mm);
+	if (!ret)
+		ret = -EFAULT;
 
 	/* completing the iocb can drop the ctx and mm, don't touch mm after */
 	aio_complete(iocb, ret, ret);
 
 	kfree(priv->buf);
+	kfree(priv->to_free);
 	kfree(priv);
 }
 
@@ -603,8 +491,9 @@
 	 * don't need to copy anything to userspace, so we can
 	 * complete the aio request immediately.
 	 */
-	if (priv->iv == NULL || unlikely(req->actual == 0)) {
+	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
 		kfree(req->buf);
+		kfree(priv->to_free);
 		kfree(priv);
 		iocb->private = NULL;
 		/* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@
 
 		priv->buf = req->buf;
 		priv->actual = req->actual;
+		INIT_WORK(&priv->work, ep_user_copy_worker);
 		schedule_work(&priv->work);
 	}
 	spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@
 	put_ep(epdata);
 }
 
-static ssize_t
-ep_aio_rwtail(
-	struct kiocb	*iocb,
-	char		*buf,
-	size_t		len,
-	struct ep_data	*epdata,
-	const struct iovec *iv,
-	unsigned long	nr_segs
-)
+static ssize_t ep_aio(struct kiocb *iocb,
+		      struct kiocb_priv *priv,
+		      struct ep_data *epdata,
+		      char *buf,
+		      size_t len)
 {
-	struct kiocb_priv	*priv;
-	struct usb_request	*req;
-	ssize_t			value;
+	struct usb_request *req;
+	ssize_t value;
 
-	priv = kmalloc(sizeof *priv, GFP_KERNEL);
-	if (!priv) {
-		value = -ENOMEM;
-fail:
-		kfree(buf);
-		return value;
-	}
 	iocb->private = priv;
 	priv->iocb = iocb;
-	priv->iv = iv;
-	priv->nr_segs = nr_segs;
-	INIT_WORK(&priv->work, ep_user_copy_worker);
-
-	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
-	if (unlikely(value < 0)) {
-		kfree(priv);
-		goto fail;
-	}
 
 	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 	get_ep(epdata);
@@ -669,75 +538,154 @@
 	 * allocate or submit those if the host disconnected.
 	 */
 	spin_lock_irq(&epdata->dev->lock);
-	if (likely(epdata->ep)) {
-		req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
-		if (likely(req)) {
-			priv->req = req;
-			req->buf = buf;
-			req->length = len;
-			req->complete = ep_aio_complete;
-			req->context = iocb;
-			value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
-			if (unlikely(0 != value))
-				usb_ep_free_request(epdata->ep, req);
-		} else
-			value = -EAGAIN;
-	} else
-		value = -ENODEV;
+	value = -ENODEV;
+	if (unlikely(epdata->ep))
+		goto fail;
+
+	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+	value = -ENOMEM;
+	if (unlikely(!req))
+		goto fail;
+
+	priv->req = req;
+	req->buf = buf;
+	req->length = len;
+	req->complete = ep_aio_complete;
+	req->context = iocb;
+	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+	if (unlikely(0 != value)) {
+		usb_ep_free_request(epdata->ep, req);
+		goto fail;
+	}
 	spin_unlock_irq(&epdata->dev->lock);
+	return -EIOCBQUEUED;
 
-	mutex_unlock(&epdata->lock);
-
-	if (unlikely(value)) {
-		kfree(priv);
-		put_ep(epdata);
-	} else
-		value = -EIOCBQUEUED;
+fail:
+	spin_unlock_irq(&epdata->dev->lock);
+	kfree(priv->to_free);
+	kfree(priv);
+	put_ep(epdata);
 	return value;
 }
 
 static ssize_t
-ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
-		unsigned long nr_segs, loff_t o)
+ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-	struct ep_data		*epdata = iocb->ki_filp->private_data;
-	char			*buf;
+	struct file *file = iocb->ki_filp;
+	struct ep_data *epdata = file->private_data;
+	size_t len = iov_iter_count(to);
+	ssize_t value;
+	char *buf;
 
-	if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
-		return -EINVAL;
+	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
+		return value;
 
-	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
-	if (unlikely(!buf))
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (usb_endpoint_dir_in(&epdata->desc)) {
+		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+		    !is_sync_kiocb(iocb)) {
+			mutex_unlock(&epdata->lock);
+			return -EINVAL;
+		}
+		DBG (epdata->dev, "%s halt\n", epdata->name);
+		spin_lock_irq(&epdata->dev->lock);
+		if (likely(epdata->ep != NULL))
+			usb_ep_set_halt(epdata->ep);
+		spin_unlock_irq(&epdata->dev->lock);
+		mutex_unlock(&epdata->lock);
+		return -EBADMSG;
+	}
+
+	buf = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		mutex_unlock(&epdata->lock);
 		return -ENOMEM;
-
-	return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
+	}
+	if (is_sync_kiocb(iocb)) {
+		value = ep_io(epdata, buf, len);
+		if (value >= 0 && copy_to_iter(buf, value, to))
+			value = -EFAULT;
+	} else {
+		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+		value = -ENOMEM;
+		if (!priv)
+			goto fail;
+		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
+		if (!priv->to_free) {
+			kfree(priv);
+			goto fail;
+		}
+		value = ep_aio(iocb, priv, epdata, buf, len);
+		if (value == -EIOCBQUEUED)
+			buf = NULL;
+	}
+fail:
+	kfree(buf);
+	mutex_unlock(&epdata->lock);
+	return value;
 }
 
+static ssize_t ep_config(struct ep_data *, const char *, size_t);
+
 static ssize_t
-ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
-		unsigned long nr_segs, loff_t o)
+ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
-	struct ep_data		*epdata = iocb->ki_filp->private_data;
-	char			*buf;
-	size_t			len = 0;
-	int			i = 0;
+	struct file *file = iocb->ki_filp;
+	struct ep_data *epdata = file->private_data;
+	size_t len = iov_iter_count(from);
+	bool configured;
+	ssize_t value;
+	char *buf;
 
-	if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
-		return -EINVAL;
+	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
+		return value;
 
-	buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
-	if (unlikely(!buf))
-		return -ENOMEM;
+	configured = epdata->state == STATE_EP_ENABLED;
 
-	for (i=0; i < nr_segs; i++) {
-		if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
-				iov[i].iov_len) != 0)) {
-			kfree(buf);
-			return -EFAULT;
+	/* halt any endpoint by doing a "wrong direction" i/o call */
+	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
+		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
+		    !is_sync_kiocb(iocb)) {
+			mutex_unlock(&epdata->lock);
+			return -EINVAL;
 		}
-		len += iov[i].iov_len;
+		DBG (epdata->dev, "%s halt\n", epdata->name);
+		spin_lock_irq(&epdata->dev->lock);
+		if (likely(epdata->ep != NULL))
+			usb_ep_set_halt(epdata->ep);
+		spin_unlock_irq(&epdata->dev->lock);
+		mutex_unlock(&epdata->lock);
+		return -EBADMSG;
 	}
-	return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
+
+	buf = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		mutex_unlock(&epdata->lock);
+		return -ENOMEM;
+	}
+
+	if (unlikely(copy_from_iter(buf, len, from) != len)) {
+		value = -EFAULT;
+		goto out;
+	}
+
+	if (unlikely(!configured)) {
+		value = ep_config(epdata, buf, len);
+	} else if (is_sync_kiocb(iocb)) {
+		value = ep_io(epdata, buf, len);
+	} else {
+		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
+		value = -ENOMEM;
+		if (priv) {
+			value = ep_aio(iocb, priv, epdata, buf, len);
+			if (value == -EIOCBQUEUED)
+				buf = NULL;
+		}
+	}
+out:
+	kfree(buf);
+	mutex_unlock(&epdata->lock);
+	return value;
 }
 
 /*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@
 /* used after endpoint configuration */
 static const struct file_operations ep_io_operations = {
 	.owner =	THIS_MODULE,
-	.llseek =	no_llseek,
 
-	.read =		ep_read,
-	.write =	ep_write,
-	.unlocked_ioctl = ep_ioctl,
+	.open =		ep_open,
 	.release =	ep_release,
-
-	.aio_read =	ep_aio_read,
-	.aio_write =	ep_aio_write,
+	.llseek =	no_llseek,
+	.read =		new_sync_read,
+	.write =	new_sync_write,
+	.unlocked_ioctl = ep_ioctl,
+	.read_iter =	ep_read_iter,
+	.write_iter =	ep_write_iter,
 };
 
 /* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@
  * speed descriptor, then optional high speed descriptor.
  */
 static ssize_t
-ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ep_config (struct ep_data *data, const char *buf, size_t len)
 {
-	struct ep_data		*data = fd->private_data;
 	struct usb_ep		*ep;
 	u32			tag;
 	int			value, length = len;
 
-	value = mutex_lock_interruptible(&data->lock);
-	if (value < 0)
-		return value;
-
 	if (data->state != STATE_EP_READY) {
 		value = -EL2HLT;
 		goto fail;
@@ -791,9 +734,7 @@
 		goto fail0;
 
 	/* we might need to change message format someday */
-	if (copy_from_user (&tag, buf, 4)) {
-		goto fail1;
-	}
+	memcpy(&tag, buf, 4);
 	if (tag != 1) {
 		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
 		goto fail0;
@@ -806,19 +747,15 @@
 	 */
 
 	/* full/low speed descriptor, then high speed */
-	if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
-		goto fail1;
-	}
+	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
 	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
 			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
 		goto fail0;
 	if (len != USB_DT_ENDPOINT_SIZE) {
 		if (len != 2 * USB_DT_ENDPOINT_SIZE)
 			goto fail0;
-		if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
-					USB_DT_ENDPOINT_SIZE)) {
-			goto fail1;
-		}
+		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+			USB_DT_ENDPOINT_SIZE);
 		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
 				|| data->hs_desc.bDescriptorType
 					!= USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@
 	case USB_SPEED_LOW:
 	case USB_SPEED_FULL:
 		ep->desc = &data->desc;
-		value = usb_ep_enable(ep);
-		if (value == 0)
-			data->state = STATE_EP_ENABLED;
 		break;
 	case USB_SPEED_HIGH:
 		/* fails if caller didn't provide that descriptor... */
 		ep->desc = &data->hs_desc;
-		value = usb_ep_enable(ep);
-		if (value == 0)
-			data->state = STATE_EP_ENABLED;
 		break;
 	default:
 		DBG(data->dev, "unconnected, %s init abandoned\n",
 				data->name);
 		value = -EINVAL;
+		goto gone;
 	}
+	value = usb_ep_enable(ep);
 	if (value == 0) {
-		fd->f_op = &ep_io_operations;
+		data->state = STATE_EP_ENABLED;
 		value = length;
 	}
 gone:
@@ -867,14 +800,10 @@
 		data->desc.bDescriptorType = 0;
 		data->hs_desc.bDescriptorType = 0;
 	}
-	mutex_unlock(&data->lock);
 	return value;
 fail0:
 	value = -EINVAL;
 	goto fail;
-fail1:
-	value = -EFAULT;
-	goto fail;
 }
 
 static int
@@ -902,15 +831,6 @@
 	return value;
 }
 
-/* used before endpoint configuration */
-static const struct file_operations ep_config_operations = {
-	.llseek =	no_llseek,
-
-	.open =		ep_open,
-	.write =	ep_config,
-	.release =	ep_release,
-};
-
 /*----------------------------------------------------------------------*/
 
 /* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@
 	enum ep0_state			state;
 
 	spin_lock_irq (&dev->lock);
+	if (dev->state <= STATE_DEV_OPENED) {
+		retval = -EINVAL;
+		goto done;
+	}
 
 	/* report fd mode change before acting on it */
 	if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@
 	struct dev_data		*dev = fd->private_data;
 	ssize_t			retval = -ESRCH;
 
-	spin_lock_irq (&dev->lock);
-
 	/* report fd mode change before acting on it */
 	if (dev->setup_abort) {
 		dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@
 	} else
 		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
 
-	spin_unlock_irq (&dev->lock);
 	return retval;
 }
 
@@ -1281,6 +1202,9 @@
        struct dev_data         *dev = fd->private_data;
        int                     mask = 0;
 
+	if (dev->state <= STATE_DEV_OPENED)
+		return DEFAULT_POLLMASK;
+
        poll_wait(fd, &dev->wait, wait);
 
        spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@
 	return ret;
 }
 
-/* used after device configuration */
-static const struct file_operations ep0_io_operations = {
-	.owner =	THIS_MODULE,
-	.llseek =	no_llseek,
-
-	.read =		ep0_read,
-	.write =	ep0_write,
-	.fasync =	ep0_fasync,
-	.poll =		ep0_poll,
-	.unlocked_ioctl =	dev_ioctl,
-	.release =	dev_release,
-};
-
 /*----------------------------------------------------------------------*/
 
 /* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@
 			goto enomem1;
 
 		data->dentry = gadgetfs_create_file (dev->sb, data->name,
-				data, &ep_config_operations);
+				data, &ep_io_operations);
 		if (!data->dentry)
 			goto enomem2;
 		list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@
 	u32			tag;
 	char			*kbuf;
 
+	spin_lock_irq(&dev->lock);
+	if (dev->state > STATE_DEV_OPENED) {
+		value = ep0_write(fd, buf, len, ptr);
+		spin_unlock_irq(&dev->lock);
+		return value;
+	}
+	spin_unlock_irq(&dev->lock);
+
 	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
 		return -EINVAL;
 
@@ -1925,7 +1844,6 @@
 		 * on, they can work ... except in cleanup paths that
 		 * kick in after the ep0 descriptor is closed.
 		 */
-		fd->f_op = &ep0_io_operations;
 		value = len;
 	}
 	return value;
@@ -1956,12 +1874,14 @@
 	return value;
 }
 
-static const struct file_operations dev_init_operations = {
+static const struct file_operations ep0_operations = {
 	.llseek =	no_llseek,
 
 	.open =		dev_open,
+	.read =		ep0_read,
 	.write =	dev_config,
 	.fasync =	ep0_fasync,
+	.poll =		ep0_poll,
 	.unlocked_ioctl = dev_ioctl,
 	.release =	dev_release,
 };
@@ -2077,7 +1997,7 @@
 		goto Enomem;
 
 	dev->sb = sb;
-	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
+	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
 	if (!dev->dentry) {
 		put_dev(dev);
 		goto Enomem;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7f76c8a..fd53c9e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,6 +37,9 @@
 
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
+#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -133,6 +136,12 @@
 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
 	}
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+	}
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
 			pdev->device == PCI_DEVICE_ID_EJ168) {
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -159,6 +168,21 @@
 				"QUIRK: Resetting on resume");
 }
 
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct xhci_hcd *xhci)
+{
+	u32 val;
+	void __iomem *reg;
+
+	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+	val = readl(reg);
+	writel(val | BIT(28), reg);
+	readl(reg);
+}
+
 /* called during probe() after chip reset completes */
 static int xhci_pci_setup(struct usb_hcd *hcd)
 {
@@ -283,6 +307,9 @@
 	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
 		pdev->no_d3cold = true;
 
+	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+		xhci_pme_quirk(xhci);
+
 	return xhci_suspend(xhci, do_wakeup);
 }
 
@@ -313,6 +340,9 @@
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
 		usb_enable_intel_xhci_ports(pdev);
 
+	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+		xhci_pme_quirk(xhci);
+
 	retval = xhci_resume(xhci, hibernated);
 	return retval;
 }
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 08d402b..0e11d61 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -83,16 +83,6 @@
 	if (irq < 0)
 		return -ENODEV;
 
-
-	if (of_device_is_compatible(pdev->dev.of_node,
-				    "marvell,armada-375-xhci") ||
-	    of_device_is_compatible(pdev->dev.of_node,
-				    "marvell,armada-380-xhci")) {
-		ret = xhci_mvebu_mbus_init_quirk(pdev);
-		if (ret)
-			return ret;
-	}
-
 	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	if (ret)
@@ -127,6 +117,15 @@
 			goto put_hcd;
 	}
 
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "marvell,armada-375-xhci") ||
+	    of_device_is_compatible(pdev->dev.of_node,
+				    "marvell,armada-380-xhci")) {
+		ret = xhci_mvebu_mbus_init_quirk(pdev);
+		if (ret)
+			goto disable_clk;
+	}
+
 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto disable_clk;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 88da8d6..5fb66db 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1729,7 +1729,7 @@
 	if (!command)
 		return;
 
-	ep->ep_state |= EP_HALTED;
+	ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED;
 	ep->stopped_stream = stream_id;
 
 	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
@@ -1946,7 +1946,7 @@
 	if (event_trb != ep_ring->dequeue) {
 		/* The event was for the status stage */
 		if (event_trb == td->last_trb) {
-			if (td->urb->actual_length != 0) {
+			if (td->urb_length_set) {
 				/* Don't overwrite a previously set error code
 				 */
 				if ((*status == -EINPROGRESS || *status == 0) &&
@@ -1960,7 +1960,13 @@
 					td->urb->transfer_buffer_length;
 			}
 		} else {
-		/* Maybe the event was for the data stage? */
+			/*
+			 * Maybe the event was for the data stage? If so, update
+			 * already the actual_length of the URB and flag it as
+			 * set, so that it is not overwritten in the event for
+			 * the last TRB.
+			 */
+			td->urb_length_set = true;
 			td->urb->actual_length =
 				td->urb->transfer_buffer_length -
 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec8ac16..b06d1a5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1338,6 +1338,12 @@
 		goto exit;
 	}
 
+	/* Reject urb if endpoint is in soft reset, queue must stay empty */
+	if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
+		xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
+		ret = -EINVAL;
+	}
+
 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
 		size = urb->number_of_packets;
 	else
@@ -2948,23 +2954,36 @@
 	}
 }
 
-/* Called when clearing halted device. The core should have sent the control
+/* Called after clearing a halted device. USB core should have sent the control
  * message to clear the device halt condition. The host side of the halt should
- * already be cleared with a reset endpoint command issued when the STALL tx
- * event was received.
- *
- * Context: in_interrupt
+ * already be cleared with a reset endpoint command issued immediately when the
+ * STALL tx event was received.
  */
 
 void xhci_endpoint_reset(struct usb_hcd *hcd,
 		struct usb_host_endpoint *ep)
 {
 	struct xhci_hcd *xhci;
+	struct usb_device *udev;
+	struct xhci_virt_device *virt_dev;
+	struct xhci_virt_ep *virt_ep;
+	struct xhci_input_control_ctx *ctrl_ctx;
+	struct xhci_command *command;
+	unsigned int ep_index, ep_state;
+	unsigned long flags;
+	u32 ep_flag;
 
 	xhci = hcd_to_xhci(hcd);
+	udev = (struct usb_device *) ep->hcpriv;
+	if (!ep->hcpriv)
+		return;
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+	virt_ep = &virt_dev->eps[ep_index];
+	ep_state = virt_ep->ep_state;
 
 	/*
-	 * We might need to implement the config ep cmd in xhci 4.8.1 note:
+	 * Implement the config ep command in xhci 4.6.8 additional note:
 	 * The Reset Endpoint Command may only be issued to endpoints in the
 	 * Halted state. If software wishes reset the Data Toggle or Sequence
 	 * Number of an endpoint that isn't in the Halted state, then software
@@ -2972,9 +2991,72 @@
 	 * for the target endpoint. that is in the Stopped state.
 	 */
 
-	/* For now just print debug to follow the situation */
-	xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
-		 ep->desc.bEndpointAddress);
+	if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) {
+		virt_ep->ep_state &= ~EP_RECENTLY_HALTED;
+		xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n");
+		return;
+	}
+
+	/* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
+	if (usb_endpoint_xfer_control(&ep->desc) ||
+	    usb_endpoint_xfer_isoc(&ep->desc))
+		return;
+
+	ep_flag = xhci_get_endpoint_flag(&ep->desc);
+
+	if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
+		return;
+
+	command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
+	if (!command) {
+		xhci_err(xhci, "Could not allocate xHCI command structure.\n");
+		return;
+	}
+
+	spin_lock_irqsave(&xhci->lock, flags);
+
+	/* block ringing ep doorbell */
+	virt_ep->ep_state |= EP_CONFIG_PENDING;
+
+	/*
+	 * Make sure endpoint ring is empty before resetting the toggle/seq.
+	 * Driver is required to synchronously cancel all transfer request.
+	 *
+	 * xhci 4.6.6 says we can issue a configure endpoint command on a
+	 * running endpoint ring as long as it's idle (queue empty)
+	 */
+
+	if (!list_empty(&virt_ep->ring->td_list)) {
+		dev_err(&udev->dev, "EP not empty, refuse reset\n");
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto cleanup;
+	}
+
+	xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
+		 udev->slot_id, ep_index);
+
+	ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
+	if (!ctrl_ctx) {
+		xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
+			 virt_dev, virt_dev->in_ctx);
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		goto cleanup;
+	}
+	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
+					   virt_dev->out_ctx, ctrl_ctx,
+					   ep_flag, ep_flag);
+	xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
+
+	xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
+				     udev->slot_id, false);
+	xhci_ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	wait_for_completion(command->completion);
+
+cleanup:
+	virt_ep->ep_state &= ~EP_CONFIG_PENDING;
+	xhci_free_command(xhci, command);
 }
 
 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 9745147..265ab17 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,3 +1,4 @@
+
 /*
  * xHCI host controller driver
  *
@@ -88,9 +89,10 @@
 #define HCS_IST(p)		(((p) >> 0) & 0xf)
 /* bits 4:7, max number of Event Ring segments */
 #define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
 /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
 
 /* HCSPARAMS3 - hcs_params3 - bitmasks */
 /* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -863,6 +865,8 @@
 #define EP_HAS_STREAMS		(1 << 4)
 /* Transitioning the endpoint to not using streams, don't enqueue URBs */
 #define EP_GETTING_NO_STREAMS	(1 << 5)
+#define EP_RECENTLY_HALTED	(1 << 6)
+#define EP_CONFIG_PENDING	(1 << 7)
 	/* ----  Related to URB cancellation ---- */
 	struct list_head	cancelled_td_list;
 	struct xhci_td		*stopped_td;
@@ -1288,6 +1292,8 @@
 	struct xhci_segment	*start_seg;
 	union xhci_trb		*first_trb;
 	union xhci_trb		*last_trb;
+	/* actual_length of the URB has already been set */
+	bool			urb_length_set;
 };
 
 /* xHCI command default timeout value */
@@ -1560,6 +1566,7 @@
 #define XHCI_SPURIOUS_WAKEUP	(1 << 18)
 /* For controllers with a broken beyond repair streams implementation */
 #define XHCI_BROKEN_STREAMS	(1 << 19)
+#define XHCI_PME_STUCK_QUIRK	(1 << 20)
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
 	/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index eba9b82..3cb98b1 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1274,7 +1274,7 @@
 	for (slot = 0; slot < 32; slot++)
 		if (priv->atl_slots[slot].qh && time_after(jiffies,
 					priv->atl_slots[slot].timestamp +
-					SLOT_TIMEOUT * HZ / 1000)) {
+					msecs_to_jiffies(SLOT_TIMEOUT))) {
 			ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
 			if (!FROM_DW0_VALID(ptd.dw0) &&
 					!FROM_DW3_ACTIVE(ptd.dw3))
@@ -1286,7 +1286,7 @@
 
 	spin_unlock_irqrestore(&priv->lock, spinflags);
 
-	errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+	errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
 	add_timer(&errata2_timer);
 }
 
@@ -1336,7 +1336,7 @@
 		return retval;
 
 	setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
-	errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
+	errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
 	add_timer(&errata2_timer);
 
 	chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e6f4cbf..067920f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1969,10 +1969,6 @@
 		goto fail0;
 	}
 
-	pm_runtime_use_autosuspend(musb->controller);
-	pm_runtime_set_autosuspend_delay(musb->controller, 200);
-	pm_runtime_enable(musb->controller);
-
 	spin_lock_init(&musb->lock);
 	musb->board_set_power = plat->set_power;
 	musb->min_power = plat->min_power;
@@ -1991,6 +1987,12 @@
 	musb_readl = musb_default_readl;
 	musb_writel = musb_default_writel;
 
+	/* We need musb_read/write functions initialized for PM */
+	pm_runtime_use_autosuspend(musb->controller);
+	pm_runtime_set_autosuspend_delay(musb->controller, 200);
+	pm_runtime_irq_safe(musb->controller);
+	pm_runtime_enable(musb->controller);
+
 	/* The musb_platform_init() call:
 	 *   - adjusts musb->mregs
 	 *   - sets the musb->isr
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 53bd0e7..a900c98 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -457,12 +457,27 @@
 	if (IS_ERR(musb->xceiv))
 		return PTR_ERR(musb->xceiv);
 
+	musb->phy = devm_phy_get(dev->parent, "usb2-phy");
+
 	/* Returns zero if e.g. not clocked */
 	rev = dsps_readl(reg_base, wrp->revision);
 	if (!rev)
 		return -ENODEV;
 
 	usb_phy_init(musb->xceiv);
+	if (IS_ERR(musb->phy))  {
+		musb->phy = NULL;
+	} else {
+		ret = phy_init(musb->phy);
+		if (ret < 0)
+			return ret;
+		ret = phy_power_on(musb->phy);
+		if (ret) {
+			phy_exit(musb->phy);
+			return ret;
+		}
+	}
+
 	setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
 
 	/* Reset the musb */
@@ -502,6 +517,8 @@
 
 	del_timer_sync(&glue->timer);
 	usb_phy_shutdown(musb->xceiv);
+	phy_power_off(musb->phy);
+	phy_exit(musb->phy);
 	debugfs_remove_recursive(glue->dbgfs_root);
 
 	return 0;
@@ -610,7 +627,7 @@
 	struct device *dev = musb->controller;
 	struct dsps_glue *glue = dev_get_drvdata(dev->parent);
 	const struct dsps_musb_wrapper *wrp = glue->wrp;
-	int session_restart = 0;
+	int session_restart = 0, error;
 
 	if (glue->sw_babble_enabled)
 		session_restart = sw_babble_control(musb);
@@ -624,8 +641,14 @@
 		dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset));
 		usleep_range(100, 200);
 		usb_phy_shutdown(musb->xceiv);
+		error = phy_power_off(musb->phy);
+		if (error)
+			dev_err(dev, "phy shutdown failed: %i\n", error);
 		usleep_range(100, 200);
 		usb_phy_init(musb->xceiv);
+		error = phy_power_on(musb->phy);
+		if (error)
+			dev_err(dev, "phy powerup failed: %i\n", error);
 		session_restart = 1;
 	}
 
@@ -687,7 +710,7 @@
 	struct musb_hdrc_config	*config;
 	struct platform_device *musb;
 	struct device_node *dn = parent->dev.of_node;
-	int ret;
+	int ret, val;
 
 	memset(resources, 0, sizeof(resources));
 	res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
@@ -739,7 +762,10 @@
 	pdata.mode = get_musb_port_mode(dev);
 	/* DT keeps this entry in mA, musb expects it as per USB spec */
 	pdata.power = get_int_prop(dn, "mentor,power") / 2;
-	config->multipoint = of_property_read_bool(dn, "mentor,multipoint");
+
+	ret = of_property_read_u32(dn, "mentor,multipoint", &val);
+	if (!ret && val)
+		config->multipoint = true;
 
 	ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
 	if (ret) {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 883a9ad..c3d5fc9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2613,7 +2613,7 @@
 	.description		= "musb-hcd",
 	.product_desc		= "MUSB HDRC host driver",
 	.hcd_priv_size		= sizeof(struct musb *),
-	.flags			= HCD_USB2 | HCD_MEMORY,
+	.flags			= HCD_USB2 | HCD_MEMORY | HCD_BH,
 
 	/* not using irq handler or reset hooks from usbcore, since
 	 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 763649e..cc752d8 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -516,7 +516,7 @@
 	struct omap2430_glue		*glue;
 	struct device_node		*np = pdev->dev.of_node;
 	struct musb_hdrc_config		*config;
-	int				ret = -ENOMEM;
+	int				ret = -ENOMEM, val;
 
 	glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
 	if (!glue)
@@ -559,7 +559,10 @@
 		of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
 		of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
 		of_property_read_u32(np, "power", (u32 *)&pdata->power);
-		config->multipoint = of_property_read_bool(np, "multipoint");
+
+		ret = of_property_read_u32(np, "multipoint", &val);
+		if (!ret && val)
+			config->multipoint = true;
 
 		pdata->board_data	= data;
 		pdata->config		= config;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index de83b9d..ebc99ee 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -6,6 +6,7 @@
 	tristate 'Renesas USBHS controller'
 	depends on USB_GADGET
 	depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+	depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in
 	default n
 	help
 	  Renesas USBHS is a discrete USB host and peripheral controller chip
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 9374bd2..8936a83 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -38,56 +38,51 @@
 	return 0;
 }
 
-static ssize_t port_number_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct usb_serial_port *port = to_usb_serial_port(dev);
-
-	return sprintf(buf, "%d\n", port->port_number);
-}
-static DEVICE_ATTR_RO(port_number);
-
 static int usb_serial_device_probe(struct device *dev)
 {
 	struct usb_serial_driver *driver;
 	struct usb_serial_port *port;
+	struct device *tty_dev;
 	int retval = 0;
 	int minor;
 
 	port = to_usb_serial_port(dev);
-	if (!port) {
-		retval = -ENODEV;
-		goto exit;
-	}
+	if (!port)
+		return -ENODEV;
 
 	/* make sure suspend/resume doesn't race against port_probe */
 	retval = usb_autopm_get_interface(port->serial->interface);
 	if (retval)
-		goto exit;
+		return retval;
 
 	driver = port->serial->type;
 	if (driver->port_probe) {
 		retval = driver->port_probe(port);
 		if (retval)
-			goto exit_with_autopm;
-	}
-
-	retval = device_create_file(dev, &dev_attr_port_number);
-	if (retval) {
-		if (driver->port_remove)
-			retval = driver->port_remove(port);
-		goto exit_with_autopm;
+			goto err_autopm_put;
 	}
 
 	minor = port->minor;
-	tty_register_device(usb_serial_tty_driver, minor, dev);
+	tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
+	if (IS_ERR(tty_dev)) {
+		retval = PTR_ERR(tty_dev);
+		goto err_port_remove;
+	}
+
+	usb_autopm_put_interface(port->serial->interface);
+
 	dev_info(&port->serial->dev->dev,
 		 "%s converter now attached to ttyUSB%d\n",
 		 driver->description, minor);
 
-exit_with_autopm:
+	return 0;
+
+err_port_remove:
+	if (driver->port_remove)
+		driver->port_remove(port);
+err_autopm_put:
 	usb_autopm_put_interface(port->serial->interface);
-exit:
+
 	return retval;
 }
 
@@ -114,8 +109,6 @@
 	minor = port->minor;
 	tty_unregister_device(usb_serial_tty_driver, minor);
 
-	device_remove_file(&port->dev, &dev_attr_port_number);
-
 	driver = port->serial->type;
 	if (driver->port_remove)
 		retval = driver->port_remove(port);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2d72aa3..ede4f5f 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -84,6 +84,10 @@
 	u8 line_status; /* active status of modem control inputs */
 };
 
+static void ch341_set_termios(struct tty_struct *tty,
+			      struct usb_serial_port *port,
+			      struct ktermios *old_termios);
+
 static int ch341_control_out(struct usb_device *dev, u8 request,
 			     u16 value, u16 index)
 {
@@ -309,19 +313,12 @@
 	struct ch341_private *priv = usb_get_serial_port_data(port);
 	int r;
 
-	priv->baud_rate = DEFAULT_BAUD_RATE;
-
 	r = ch341_configure(serial->dev, priv);
 	if (r)
 		goto out;
 
-	r = ch341_set_handshake(serial->dev, priv->line_control);
-	if (r)
-		goto out;
-
-	r = ch341_set_baudrate(serial->dev, priv);
-	if (r)
-		goto out;
+	if (tty)
+		ch341_set_termios(tty, port, NULL);
 
 	dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__);
 	r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 29fa1c3..3806e70 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/tty.h>
 #include <linux/console.h>
@@ -144,6 +145,7 @@
 			init_ldsem(&tty->ldisc_sem);
 			INIT_LIST_HEAD(&tty->tty_files);
 			kref_get(&tty->driver->kref);
+			__module_get(tty->driver->owner);
 			tty->ops = &usb_console_fake_tty_ops;
 			if (tty_init_termios(tty)) {
 				retval = -ENOMEM;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f40c856..84ce2d7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -147,6 +147,8 @@
 	{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
 	{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
 	{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
+	{ USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
+	{ USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
 	{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
 	{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
 	{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ebb351..3086dec 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -799,6 +799,8 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
+	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -978,6 +980,23 @@
 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
 	/* GE Healthcare devices */
 	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+	/* Active Research (Actisense) devices */
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
+	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e52409c..56b1b55 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,9 @@
 
 #define FTDI_LUMEL_PD12_PID	0x6002
 
+/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
+#define CYBER_CORTEX_AV_PID	0x8698
+
 /*
  * Marvell OpenRD Base, Client
  * http://www.open-rd.org
@@ -1438,3 +1441,23 @@
  */
 #define GE_HEALTHCARE_VID		0x1901
 #define GE_HEALTHCARE_NEMO_TRACKER_PID	0x0015
+
+/*
+ * Active Research (Actisense) devices
+ */
+#define ACTISENSE_NDC_PID		0xD9A8 /* NDC USB Serial Adapter */
+#define ACTISENSE_USG_PID		0xD9A9 /* USG USB Serial Adapter */
+#define ACTISENSE_NGT_PID		0xD9AA /* NGT NMEA2000 Interface */
+#define ACTISENSE_NGW_PID		0xD9AB /* NGW NMEA2000 Gateway */
+#define ACTISENSE_D9AC_PID		0xD9AC /* Actisense Reserved */
+#define ACTISENSE_D9AD_PID		0xD9AD /* Actisense Reserved */
+#define ACTISENSE_D9AE_PID		0xD9AE /* Actisense Reserved */
+#define ACTISENSE_D9AF_PID		0xD9AF /* Actisense Reserved */
+#define CHETCO_SEAGAUGE_PID		0xA548 /* SeaGauge USB Adapter */
+#define CHETCO_SEASWITCH_PID		0xA549 /* SeaSwitch USB Adapter */
+#define CHETCO_SEASMART_NMEA2000_PID	0xA54A /* SeaSmart NMEA2000 Gateway */
+#define CHETCO_SEASMART_ETHERNET_PID	0xA54B /* SeaSmart Ethernet Gateway */
+#define CHETCO_SEASMART_WIFI_PID	0xA5AC /* SeaSmart Wifi Gateway */
+#define CHETCO_SEASMART_DISPLAY_PID	0xA5AD /* SeaSmart NMEA2000 Display */
+#define CHETCO_SEASMART_LITE_PID	0xA5AE /* SeaSmart Lite USB Adapter */
+#define CHETCO_SEASMART_ANALOG_PID	0xA5AF /* SeaSmart Analog Adapter */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ccf1df7..54e170d 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -258,7 +258,8 @@
 	 * character or at least one jiffy.
 	 */
 	period = max_t(unsigned long, (10 * HZ / bps), 1);
-	period = min_t(unsigned long, period, timeout);
+	if (timeout)
+		period = min_t(unsigned long, period, timeout);
 
 	dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
 					__func__, jiffies_to_msecs(timeout),
@@ -268,7 +269,7 @@
 		schedule_timeout_interruptible(period);
 		if (signal_pending(current))
 			break;
-		if (time_after(jiffies, expire))
+		if (timeout && time_after(jiffies, expire))
 			break;
 	}
 }
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index ab1d690..460a406 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1284,7 +1284,8 @@
 	}
 
 	/* Initial port termios */
-	mxuport_set_termios(tty, port, NULL);
+	if (tty)
+		mxuport_set_termios(tty, port, NULL);
 
 	/*
 	 * TODO: use RQ_VENDOR_GET_MSR, once we know what it
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 0f872e6..829604d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -132,6 +132,7 @@
 #define UART_OVERRUN_ERROR		0x40
 #define UART_CTS			0x80
 
+static void pl2303_set_break(struct usb_serial_port *port, bool enable);
 
 enum pl2303_type {
 	TYPE_01,	/* Type 0 and 1 (difference unknown) */
@@ -615,6 +616,7 @@
 {
 	usb_serial_generic_close(port);
 	usb_kill_urb(port->interrupt_in_urb);
+	pl2303_set_break(port, false);
 }
 
 static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -741,17 +743,16 @@
 	return -ENOIOCTLCMD;
 }
 
-static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
+static void pl2303_set_break(struct usb_serial_port *port, bool enable)
 {
-	struct usb_serial_port *port = tty->driver_data;
 	struct usb_serial *serial = port->serial;
 	u16 state;
 	int result;
 
-	if (break_state == 0)
-		state = BREAK_OFF;
-	else
+	if (enable)
 		state = BREAK_ON;
+	else
+		state = BREAK_OFF;
 
 	dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
 			state == BREAK_OFF ? "off" : "on");
@@ -763,6 +764,13 @@
 		dev_err(&port->dev, "error sending break = %d\n", result);
 }
 
+static void pl2303_break_ctl(struct tty_struct *tty, int state)
+{
+	struct usb_serial_port *port = tty->driver_data;
+
+	pl2303_set_break(port, state);
+}
+
 static void pl2303_update_line_status(struct usb_serial_port *port,
 				      unsigned char *data,
 				      unsigned int actual_length)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 475723c..529066b 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -687,6 +687,21 @@
 		drv->dtr_rts(p, on);
 }
 
+static ssize_t port_number_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct usb_serial_port *port = to_usb_serial_port(dev);
+
+	return sprintf(buf, "%u\n", port->port_number);
+}
+static DEVICE_ATTR_RO(port_number);
+
+static struct attribute *usb_serial_port_attrs[] = {
+	&dev_attr_port_number.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(usb_serial_port);
+
 static const struct tty_port_operations serial_port_ops = {
 	.carrier_raised		= serial_port_carrier_raised,
 	.dtr_rts		= serial_port_dtr_rts,
@@ -902,6 +917,7 @@
 		port->dev.driver = NULL;
 		port->dev.bus = &usb_serial_bus_type;
 		port->dev.release = &usb_serial_port_release;
+		port->dev.groups = usb_serial_port_groups;
 		device_initialize(&port->dev);
 	}
 
@@ -940,8 +956,9 @@
 		port = serial->port[i];
 		if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
 			goto probe_error;
-		buffer_size = max_t(int, serial->type->bulk_out_size,
-						usb_endpoint_maxp(endpoint));
+		buffer_size = serial->type->bulk_out_size;
+		if (!buffer_size)
+			buffer_size = usb_endpoint_maxp(endpoint);
 		port->bulk_out_size = buffer_size;
 		port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
 
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index dbc00e5..8257042 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_ATA_1X),
 
+/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+		"JMicron",
+		"JMS539",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
 		"JMicron",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d468d02..5600c33 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -889,6 +889,12 @@
 	    !(us->fflags & US_FL_SCM_MULT_TARG)) {
 		mutex_lock(&us->dev_mutex);
 		us->max_lun = usb_stor_Bulk_max_lun(us);
+		/*
+		 * Allow proper scanning of devices that present more than 8 LUNs
+		 * While not affecting other devices that may need the previous behavior
+		 */
+		if (us->max_lun >= 8)
+			us_to_host(us)->max_lun = us->max_lun+1;
 		mutex_unlock(&us->dev_mutex);
 	}
 	scsi_scan_host(us_to_host(us));
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7cc0122..f8a1863 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -239,9 +239,12 @@
 
 			return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
 		}
-	} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+	} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
 		if (pci_is_pcie(vdev->pdev))
 			return 1;
+	} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+		return 1;
+	}
 
 	return 0;
 }
@@ -464,6 +467,7 @@
 
 		switch (info.index) {
 		case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+		case VFIO_PCI_REQ_IRQ_INDEX:
 			break;
 		case VFIO_PCI_ERR_IRQ_INDEX:
 			if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@
 			       req_len, vma->vm_page_prot);
 }
 
+static void vfio_pci_request(void *device_data, unsigned int count)
+{
+	struct vfio_pci_device *vdev = device_data;
+
+	mutex_lock(&vdev->igate);
+
+	if (vdev->req_trigger) {
+		dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+		eventfd_signal(vdev->req_trigger, 1);
+	}
+
+	mutex_unlock(&vdev->igate);
+}
+
 static const struct vfio_device_ops vfio_pci_ops = {
 	.name		= "vfio-pci",
 	.open		= vfio_pci_open,
@@ -836,6 +854,7 @@
 	.read		= vfio_pci_read,
 	.write		= vfio_pci_write,
 	.mmap		= vfio_pci_mmap,
+	.request	= vfio_pci_request,
 };
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e8d695b..2027a27 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -763,46 +763,70 @@
 	return 0;
 }
 
-static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
-				    unsigned index, unsigned start,
-				    unsigned count, uint32_t flags, void *data)
+static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
+					   uint32_t flags, void *data)
 {
 	int32_t fd = *(int32_t *)data;
 
-	if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
-	    !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+	if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
 		return -EINVAL;
 
 	/* DATA_NONE/DATA_BOOL enables loopback testing */
 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
-		if (vdev->err_trigger)
-			eventfd_signal(vdev->err_trigger, 1);
+		if (*ctx)
+			eventfd_signal(*ctx, 1);
 		return 0;
 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
 		uint8_t trigger = *(uint8_t *)data;
-		if (trigger && vdev->err_trigger)
-			eventfd_signal(vdev->err_trigger, 1);
+		if (trigger && *ctx)
+			eventfd_signal(*ctx, 1);
 		return 0;
 	}
 
 	/* Handle SET_DATA_EVENTFD */
 	if (fd == -1) {
-		if (vdev->err_trigger)
-			eventfd_ctx_put(vdev->err_trigger);
-		vdev->err_trigger = NULL;
+		if (*ctx)
+			eventfd_ctx_put(*ctx);
+		*ctx = NULL;
 		return 0;
 	} else if (fd >= 0) {
 		struct eventfd_ctx *efdctx;
 		efdctx = eventfd_ctx_fdget(fd);
 		if (IS_ERR(efdctx))
 			return PTR_ERR(efdctx);
-		if (vdev->err_trigger)
-			eventfd_ctx_put(vdev->err_trigger);
-		vdev->err_trigger = efdctx;
+		if (*ctx)
+			eventfd_ctx_put(*ctx);
+		*ctx = efdctx;
 		return 0;
 	} else
 		return -EINVAL;
 }
+
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+				    unsigned index, unsigned start,
+				    unsigned count, uint32_t flags, void *data)
+{
+	if (index != VFIO_PCI_ERR_IRQ_INDEX)
+		return -EINVAL;
+
+	/*
+	 * We should sanitize start & count, but that wasn't caught
+	 * originally, so this IRQ index must forever ignore them :-(
+	 */
+
+	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+}
+
+static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
+				    unsigned index, unsigned start,
+				    unsigned count, uint32_t flags, void *data)
+{
+	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+		return -EINVAL;
+
+	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+}
+
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
 			    unsigned index, unsigned start, unsigned count,
 			    void *data)
@@ -844,6 +868,14 @@
 				func = vfio_pci_set_err_trigger;
 			break;
 		}
+		break;
+	case VFIO_PCI_REQ_IRQ_INDEX:
+		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+		case VFIO_IRQ_SET_ACTION_TRIGGER:
+			func = vfio_pci_set_req_trigger;
+			break;
+		}
+		break;
 	}
 
 	if (!func)
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 671c17a..c9f9b32 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -58,6 +58,7 @@
 	struct pci_saved_state	*pci_saved_state;
 	int			refcnt;
 	struct eventfd_ctx	*err_trigger;
+	struct eventfd_ctx	*req_trigger;
 };
 
 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f018d8d..4cde855 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -63,6 +63,11 @@
 	void				*iommu_data;
 };
 
+struct vfio_unbound_dev {
+	struct device			*dev;
+	struct list_head		unbound_next;
+};
+
 struct vfio_group {
 	struct kref			kref;
 	int				minor;
@@ -75,6 +80,8 @@
 	struct notifier_block		nb;
 	struct list_head		vfio_next;
 	struct list_head		container_next;
+	struct list_head		unbound_list;
+	struct mutex			unbound_lock;
 	atomic_t			opened;
 };
 
@@ -204,6 +211,8 @@
 	kref_init(&group->kref);
 	INIT_LIST_HEAD(&group->device_list);
 	mutex_init(&group->device_lock);
+	INIT_LIST_HEAD(&group->unbound_list);
+	mutex_init(&group->unbound_lock);
 	atomic_set(&group->container_users, 0);
 	atomic_set(&group->opened, 0);
 	group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@
 static void vfio_group_release(struct kref *kref)
 {
 	struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+	struct vfio_unbound_dev *unbound, *tmp;
+	struct iommu_group *iommu_group = group->iommu_group;
 
 	WARN_ON(!list_empty(&group->device_list));
 
+	list_for_each_entry_safe(unbound, tmp,
+				 &group->unbound_list, unbound_next) {
+		list_del(&unbound->unbound_next);
+		kfree(unbound);
+	}
+
 	device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
 	list_del(&group->vfio_next);
 	vfio_free_group_minor(group->minor);
 	vfio_group_unlock_and_free(group);
+	iommu_group_put(iommu_group);
 }
 
 static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@
 }
 
 /*
- * A vfio group is viable for use by userspace if all devices are either
- * driver-less or bound to a vfio or whitelisted driver.  We test the
- * latter by the existence of a struct vfio_device matching the dev.
+ * A vfio group is viable for use by userspace if all devices are in
+ * one of the following states:
+ *  - driver-less
+ *  - bound to a vfio driver
+ *  - bound to a whitelisted driver
+ *
+ * We use two methods to determine whether a device is bound to a vfio
+ * driver.  The first is to test whether the device exists in the vfio
+ * group.  The second is to test if the device exists on the group
+ * unbound_list, indicating it's in the middle of transitioning from
+ * a vfio driver to driver-less.
  */
 static int vfio_dev_viable(struct device *dev, void *data)
 {
 	struct vfio_group *group = data;
 	struct vfio_device *device;
 	struct device_driver *drv = ACCESS_ONCE(dev->driver);
+	struct vfio_unbound_dev *unbound;
+	int ret = -EINVAL;
 
-	if (!drv || vfio_whitelisted_driver(drv))
+	mutex_lock(&group->unbound_lock);
+	list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
+		if (dev == unbound->dev) {
+			ret = 0;
+			break;
+		}
+	}
+	mutex_unlock(&group->unbound_lock);
+
+	if (!ret || !drv || vfio_whitelisted_driver(drv))
 		return 0;
 
 	device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@
 		return 0;
 	}
 
-	return -EINVAL;
+	return ret;
 }
 
 /**
@@ -501,6 +538,7 @@
 {
 	struct vfio_group *group = container_of(nb, struct vfio_group, nb);
 	struct device *dev = data;
+	struct vfio_unbound_dev *unbound;
 
 	/*
 	 * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@
 		 * stop the system to maintain isolation.  At a minimum, we'd
 		 * want a toggle to disable driver auto probe for this device.
 		 */
+
+		mutex_lock(&group->unbound_lock);
+		list_for_each_entry(unbound,
+				    &group->unbound_list, unbound_next) {
+			if (dev == unbound->dev) {
+				list_del(&unbound->unbound_next);
+				kfree(unbound);
+				break;
+			}
+		}
+		mutex_unlock(&group->unbound_lock);
 		break;
 	}
 
@@ -578,6 +627,12 @@
 			iommu_group_put(iommu_group);
 			return PTR_ERR(group);
 		}
+	} else {
+		/*
+		 * A found vfio_group already holds a reference to the
+		 * iommu_group.  A created vfio_group keeps the reference.
+		 */
+		iommu_group_put(iommu_group);
 	}
 
 	device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@
 		     dev_name(dev), iommu_group_id(iommu_group));
 		vfio_device_put(device);
 		vfio_group_put(group);
-		iommu_group_put(iommu_group);
 		return -EBUSY;
 	}
 
 	device = vfio_group_create_device(group, dev, ops, device_data);
 	if (IS_ERR(device)) {
 		vfio_group_put(group);
-		iommu_group_put(iommu_group);
 		return PTR_ERR(device);
 	}
 
 	/*
-	 * Added device holds reference to iommu_group and vfio_device
-	 * (which in turn holds reference to vfio_group).  Drop extra
-	 * group reference used while acquiring device.
+	 * Drop all but the vfio_device reference.  The vfio_device holds
+	 * a reference to the vfio_group, which holds a reference to the
+	 * iommu_group.
 	 */
 	vfio_group_put(group);
 
@@ -655,8 +708,9 @@
 {
 	struct vfio_device *device = dev_get_drvdata(dev);
 	struct vfio_group *group = device->group;
-	struct iommu_group *iommu_group = group->iommu_group;
 	void *device_data = device->device_data;
+	struct vfio_unbound_dev *unbound;
+	unsigned int i = 0;
 
 	/*
 	 * The group exists so long as we have a device reference.  Get
@@ -664,15 +718,50 @@
 	 */
 	vfio_group_get(group);
 
+	/*
+	 * When the device is removed from the group, the group suddenly
+	 * becomes non-viable; the device has a driver (until the unbind
+	 * completes), but it's not present in the group.  This is bad news
+	 * for any external users that need to re-acquire a group reference
+	 * in order to match and release their existing reference.  To
+	 * solve this, we track such devices on the unbound_list to bridge
+	 * the gap until they're fully unbound.
+	 */
+	unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
+	if (unbound) {
+		unbound->dev = dev;
+		mutex_lock(&group->unbound_lock);
+		list_add(&unbound->unbound_next, &group->unbound_list);
+		mutex_unlock(&group->unbound_lock);
+	}
+	WARN_ON(!unbound);
+
 	vfio_device_put(device);
 
-	/* TODO send a signal to encourage this to be released */
-	wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+	/*
+	 * If the device is still present in the group after the above
+	 * 'put', then it is in use and we need to request it from the
+	 * bus driver.  The driver may in turn need to request the
+	 * device from the user.  We send the request on an arbitrary
+	 * interval with counter to allow the driver to take escalating
+	 * measures to release the device if it has the ability to do so.
+	 */
+	do {
+		device = vfio_group_get_device(group, dev);
+		if (!device)
+			break;
+
+		if (device->ops->request)
+			device->ops->request(device_data, i++);
+
+		vfio_device_put(device);
+
+	} while (wait_event_interruptible_timeout(vfio.release_q,
+						  !vfio_dev_present(group, dev),
+						  HZ * 10) <= 0);
 
 	vfio_group_put(group);
 
-	iommu_group_put(iommu_group);
-
 	return device_data;
 }
 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4a9d666..57d8c37 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -66,6 +66,7 @@
 	struct list_head	next;
 	struct list_head	group_list;
 	int			prot;		/* IOMMU_CACHE */
+	bool			fgsp;		/* Fine-grained super pages */
 };
 
 struct vfio_dma {
@@ -264,6 +265,7 @@
 	unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	bool lock_cap = capable(CAP_IPC_LOCK);
 	long ret, i;
+	bool rsvd;
 
 	if (!current->mm)
 		return -ENODEV;
@@ -272,10 +274,9 @@
 	if (ret)
 		return ret;
 
-	if (is_invalid_reserved_pfn(*pfn_base))
-		return 1;
+	rsvd = is_invalid_reserved_pfn(*pfn_base);
 
-	if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+	if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
 		put_pfn(*pfn_base, prot);
 		pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
 			limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@
 	}
 
 	if (unlikely(disable_hugepages)) {
-		vfio_lock_acct(1);
+		if (!rsvd)
+			vfio_lock_acct(1);
 		return 1;
 	}
 
@@ -295,12 +297,14 @@
 		if (ret)
 			break;
 
-		if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+		if (pfn != *pfn_base + i ||
+		    rsvd != is_invalid_reserved_pfn(pfn)) {
 			put_pfn(pfn, prot);
 			break;
 		}
 
-		if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
+		if (!rsvd && !lock_cap &&
+		    current->mm->locked_vm + i + 1 > limit) {
 			put_pfn(pfn, prot);
 			pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
 				__func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@
 		}
 	}
 
-	vfio_lock_acct(i);
+	if (!rsvd)
+		vfio_lock_acct(i);
 
 	return i;
 }
@@ -346,12 +351,14 @@
 	domain = d = list_first_entry(&iommu->domain_list,
 				      struct vfio_domain, next);
 
-	list_for_each_entry_continue(d, &iommu->domain_list, next)
+	list_for_each_entry_continue(d, &iommu->domain_list, next) {
 		iommu_unmap(d->domain, dma->iova, dma->size);
+		cond_resched();
+	}
 
 	while (iova < end) {
-		size_t unmapped;
-		phys_addr_t phys;
+		size_t unmapped, len;
+		phys_addr_t phys, next;
 
 		phys = iommu_iova_to_phys(domain->domain, iova);
 		if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@
 			continue;
 		}
 
-		unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+		/*
+		 * To optimize for fewer iommu_unmap() calls, each of which
+		 * may require hardware cache flushing, try to find the
+		 * largest contiguous physical memory chunk to unmap.
+		 */
+		for (len = PAGE_SIZE;
+		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+			next = iommu_iova_to_phys(domain->domain, iova + len);
+			if (next != phys + len)
+				break;
+		}
+
+		unmapped = iommu_unmap(domain->domain, iova, len);
 		if (WARN_ON(!unmapped))
 			break;
 
@@ -367,6 +386,8 @@
 					     unmapped >> PAGE_SHIFT,
 					     dma->prot, false);
 		iova += unmapped;
+
+		cond_resched();
 	}
 
 	vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@
 			    map_try_harder(d, iova, pfn, npage, prot))
 				goto unwind;
 		}
+
+		cond_resched();
 	}
 
 	return 0;
@@ -665,6 +688,39 @@
 	return 0;
 }
 
+/*
+ * We change our unmap behavior slightly depending on whether the IOMMU
+ * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
+ * for practically any contiguous power-of-two mapping we give it.  This means
+ * we don't need to look for contiguous chunks ourselves to make unmapping
+ * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
+ * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
+ * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+ * hugetlbfs is in use.
+ */
+static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+{
+	struct page *pages;
+	int ret, order = get_order(PAGE_SIZE * 2);
+
+	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!pages)
+		return;
+
+	ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+			IOMMU_READ | IOMMU_WRITE | domain->prot);
+	if (!ret) {
+		size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+
+		if (unmapped == PAGE_SIZE)
+			iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+		else
+			domain->fgsp = true;
+	}
+
+	__free_pages(pages, order);
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
 					 struct iommu_group *iommu_group)
 {
@@ -758,6 +814,8 @@
 		}
 	}
 
+	vfio_test_domain_fgsp(domain);
+
 	/* replay mappings on new domains */
 	ret = vfio_iommu_replay(iommu, domain);
 	if (ret)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index afa06d2..2bbfc25 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -591,11 +591,6 @@
 			 * TODO: support TSO.
 			 */
 			iov_iter_advance(&msg.msg_iter, vhost_hlen);
-		} else {
-			/* It'll come from socket; we'll need to patch
-			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
-			 */
-			iov_iter_advance(&fixup, sizeof(hdr));
 		}
 		err = sock->ops->recvmsg(NULL, sock, &msg,
 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@
 			continue;
 		}
 		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
-		if (unlikely(vhost_hlen) &&
-		    copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
-			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
-			       vq->iov->iov_base);
-			break;
+		if (unlikely(vhost_hlen)) {
+			if (copy_to_iter(&hdr, sizeof(hdr),
+					 &fixup) != sizeof(hdr)) {
+				vq_err(vq, "Unable to write vnet_hdr "
+				       "at addr %p\n", vq->iov->iov_base);
+				break;
+			}
+		} else {
+			/* Header came from socket; we'll need to patch
+			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+			 */
+			iov_iter_advance(&fixup, sizeof(hdr));
 		}
 		/* TODO: Should check and handle checksum. */
 
 		num_buffers = cpu_to_vhost16(vq, headcount);
 		if (likely(mergeable) &&
-		    copy_to_iter(&num_buffers, 2, &fixup) != 2) {
+		    copy_to_iter(&num_buffers, sizeof num_buffers,
+				 &fixup) != sizeof num_buffers) {
 			vq_err(vq, "Failed num_buffers write");
 			vhost_discard_vq_desc(vq, headcount);
 			break;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dc78d87..8d4f3f1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -38,7 +38,6 @@
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
@@ -52,13 +51,13 @@
 
 #include "vhost.h"
 
-#define TCM_VHOST_VERSION  "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-#define TCM_VHOST_DEFAULT_TAGS 256
-#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_UPAGES 2048
-#define TCM_VHOST_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_VERSION  "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_DEFAULT_TAGS 256
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
 struct vhost_scsi_inflight {
 	/* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@
 	struct kref kref;
 };
 
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
 	int tvc_vq_desc;
 	/* virtio-scsi initiator task attribute */
 	int tvc_task_attr;
+	/* virtio-scsi response incoming iovecs */
+	int tvc_in_iovs;
 	/* virtio-scsi initiator data direction */
 	enum dma_data_direction tvc_data_direction;
 	/* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@
 	/* The number of scatterlists associated with this cmd */
 	u32 tvc_sgl_count;
 	u32 tvc_prot_sgl_count;
-	/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+	/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
 	u32 tvc_lun;
 	/* Pointer to the SGL formatted memory from virtio-scsi */
 	struct scatterlist *tvc_sgl;
 	struct scatterlist *tvc_prot_sgl;
 	struct page **tvc_upages;
-	/* Pointer to response */
-	struct virtio_scsi_cmd_resp __user *tvc_resp;
+	/* Pointer to response header iovec */
+	struct iovec *tvc_resp_iov;
 	/* Pointer to vhost_scsi for our device */
 	struct vhost_scsi *tvc_vhost;
 	/* Pointer to vhost_virtqueue for the cmd */
 	struct vhost_virtqueue *tvc_vq;
 	/* Pointer to vhost nexus memory */
-	struct tcm_vhost_nexus *tvc_nexus;
+	struct vhost_scsi_nexus *tvc_nexus;
 	/* The TCM I/O descriptor that is accessed via container_of() */
 	struct se_cmd tvc_se_cmd;
-	/* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+	/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
 	struct work_struct work;
 	/* Copy of the incoming SCSI command descriptor block (CDB) */
-	unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
 	/* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@
 	struct vhost_scsi_inflight *inflight;
 };
 
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
 	/* Pointer to TCM session for I_T Nexus */
 	struct se_session *tvn_se_sess;
 };
 
-struct tcm_vhost_nacl {
+struct vhost_scsi_nacl {
 	/* Binary World Wide unique Port Name for Vhost Initiator port */
 	u64 iport_wwpn;
 	/* ASCII formatted WWPN for Sas Initiator port */
-	char iport_name[TCM_VHOST_NAMELEN];
-	/* Returned by tcm_vhost_make_nodeacl() */
+	char iport_name[VHOST_SCSI_NAMELEN];
+	/* Returned by vhost_scsi_make_nodeacl() */
 	struct se_node_acl se_node_acl;
 };
 
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
 	/* Vhost port target portal group tag for TCM */
 	u16 tport_tpgt;
 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
 	int tv_tpg_port_count;
 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
 	int tv_tpg_vhost_count;
-	/* list for tcm_vhost_list */
+	/* list for vhost_scsi_list */
 	struct list_head tv_tpg_list;
 	/* Used to protect access for tpg_nexus */
 	struct mutex tv_tpg_mutex;
 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
-	struct tcm_vhost_nexus *tpg_nexus;
-	/* Pointer back to tcm_vhost_tport */
-	struct tcm_vhost_tport *tport;
-	/* Returned by tcm_vhost_make_tpg() */
+	struct vhost_scsi_nexus *tpg_nexus;
+	/* Pointer back to vhost_scsi_tport */
+	struct vhost_scsi_tport *tport;
+	/* Returned by vhost_scsi_make_tpg() */
 	struct se_portal_group se_tpg;
 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
 	struct vhost_scsi *vhost_scsi;
 };
 
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
 	/* SCSI protocol the tport is providing */
 	u8 tport_proto_id;
 	/* Binary World Wide unique Port Name for Vhost Target port */
 	u64 tport_wwpn;
 	/* ASCII formatted WWPN for Vhost Target port */
-	char tport_name[TCM_VHOST_NAMELEN];
-	/* Returned by tcm_vhost_make_tport() */
+	char tport_name[VHOST_SCSI_NAMELEN];
+	/* Returned by vhost_scsi_make_tport() */
 	struct se_wwn tport_wwn;
 };
 
-struct tcm_vhost_evt {
+struct vhost_scsi_evt {
 	/* event to be sent to guest */
 	struct virtio_scsi_event event;
 	/* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-					       (1ULL << VIRTIO_SCSI_F_T10_PI)
+					       (1ULL << VIRTIO_SCSI_F_T10_PI) |
+					       (1ULL << VIRTIO_F_ANY_LAYOUT) |
+					       (1ULL << VIRTIO_F_VERSION_1)
 };
 
 #define VHOST_SCSI_MAX_TARGET	256
@@ -195,7 +198,7 @@
 
 struct vhost_scsi {
 	/* Protected by vhost_scsi->dev.mutex */
-	struct tcm_vhost_tpg **vs_tpg;
+	struct vhost_scsi_tpg **vs_tpg;
 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 
 	struct vhost_dev dev;
@@ -212,21 +215,21 @@
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
 
-static struct workqueue_struct *tcm_vhost_workqueue;
+static struct workqueue_struct *vhost_scsi_workqueue;
 
-/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
-static DEFINE_MUTEX(tcm_vhost_mutex);
-static LIST_HEAD(tcm_vhost_list);
+/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
 
-static int iov_num_pages(struct iovec *iov)
+static int iov_num_pages(void __user *iov_base, size_t iov_len)
 {
-	return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
-	       ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+	return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
+	       ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static void tcm_vhost_done_inflight(struct kref *kref)
+static void vhost_scsi_done_inflight(struct kref *kref)
 {
 	struct vhost_scsi_inflight *inflight;
 
@@ -234,7 +237,7 @@
 	complete(&inflight->comp);
 }
 
-static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
 				    struct vhost_scsi_inflight *old_inflight[])
 {
 	struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@
 }
 
 static struct vhost_scsi_inflight *
-tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 {
 	struct vhost_scsi_inflight *inflight;
 	struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@
 	return inflight;
 }
 
-static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 {
-	kref_put(&inflight->kref, tcm_vhost_done_inflight);
+	kref_put(&inflight->kref, vhost_scsi_done_inflight);
 }
 
-static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 {
 	return 1;
 }
 
-static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 {
 	return 0;
 }
 
-static char *tcm_vhost_get_fabric_name(void)
+static char *vhost_scsi_get_fabric_name(void)
 {
 	return "vhost";
 }
 
-static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport = tpg->tport;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport = tpg->tport;
 
 	switch (tport->tport_proto_id) {
 	case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@
 	return sas_get_fabric_proto_ident(se_tpg);
 }
 
-static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport = tpg->tport;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport = tpg->tport;
 
 	return &tport->tport_name[0];
 }
 
-static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
 	return tpg->tport_tpgt;
 }
 
-static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
 {
 	return 1;
 }
 
 static u32
-tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
 			      struct se_node_acl *se_nacl,
 			      struct t10_pr_registration *pr_reg,
 			      int *format_code,
 			      unsigned char *buf)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport = tpg->tport;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport = tpg->tport;
 
 	switch (tport->tport_proto_id) {
 	case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@
 }
 
 static u32
-tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 				  struct se_node_acl *se_nacl,
 				  struct t10_pr_registration *pr_reg,
 				  int *format_code)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport = tpg->tport;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport = tpg->tport;
 
 	switch (tport->tport_proto_id) {
 	case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@
 }
 
 static char *
-tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 				    const char *buf,
 				    u32 *out_tid_len,
 				    char **port_nexus_ptr)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport = tpg->tport;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport = tpg->tport;
 
 	switch (tport->tport_proto_id) {
 	case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@
 }
 
 static struct se_node_acl *
-tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
+vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
-	struct tcm_vhost_nacl *nacl;
+	struct vhost_scsi_nacl *nacl;
 
-	nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+	nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
 	if (!nacl) {
-		pr_err("Unable to allocate struct tcm_vhost_nacl\n");
+		pr_err("Unable to allocate struct vhost_scsi_nacl\n");
 		return NULL;
 	}
 
@@ -443,24 +446,24 @@
 }
 
 static void
-tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
 			     struct se_node_acl *se_nacl)
 {
-	struct tcm_vhost_nacl *nacl = container_of(se_nacl,
-			struct tcm_vhost_nacl, se_node_acl);
+	struct vhost_scsi_nacl *nacl = container_of(se_nacl,
+			struct vhost_scsi_nacl, se_node_acl);
 	kfree(nacl);
 }
 
-static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
 	return 1;
 }
 
-static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 {
-	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
-				struct tcm_vhost_cmd, tvc_se_cmd);
-	struct se_session *se_sess = se_cmd->se_sess;
+	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+				struct vhost_scsi_cmd, tvc_se_cmd);
+	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
 	int i;
 
 	if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@
 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
 	}
 
-	tcm_vhost_put_inflight(tv_cmd->inflight);
+	vhost_scsi_put_inflight(tv_cmd->inflight);
 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 {
 	return 0;
 }
 
-static void tcm_vhost_close_session(struct se_session *se_sess)
+static void vhost_scsi_close_session(struct se_session *se_sess)
 {
 	return;
 }
 
-static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 {
 	return 0;
 }
 
-static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 {
 	/* Go ahead and process the write immediately */
 	target_execute_cmd(se_cmd);
 	return 0;
 }
 
-static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 {
 	return 0;
 }
 
-static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 {
 	return;
 }
 
-static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
 {
 	return 0;
 }
 
-static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 {
 	return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 {
 	struct vhost_scsi *vs = cmd->tvc_vhost;
 
@@ -527,44 +530,44 @@
 	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 }
 
-static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 {
-	struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-				struct tcm_vhost_cmd, tvc_se_cmd);
+	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+				struct vhost_scsi_cmd, tvc_se_cmd);
 	vhost_scsi_complete_cmd(cmd);
 	return 0;
 }
 
-static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 {
-	struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-				struct tcm_vhost_cmd, tvc_se_cmd);
+	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+				struct vhost_scsi_cmd, tvc_se_cmd);
 	vhost_scsi_complete_cmd(cmd);
 	return 0;
 }
 
-static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 {
 	return;
 }
 
-static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 {
 	return;
 }
 
-static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
 	vs->vs_events_nr--;
 	kfree(evt);
 }
 
-static struct tcm_vhost_evt *
-tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
 		       u32 event, u32 reason)
 {
 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-	struct tcm_vhost_evt *evt;
+	struct vhost_scsi_evt *evt;
 
 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
 		vs->vs_events_missed = true;
@@ -573,7 +576,7 @@
 
 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
 	if (!evt) {
-		vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
 		vs->vs_events_missed = true;
 		return NULL;
 	}
@@ -585,7 +588,7 @@
 	return evt;
 }
 
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 {
 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 
@@ -600,7 +603,7 @@
 }
 
 static void
-tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 	struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@
 	if (!ret)
 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
 	else
-		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 }
 
-static void tcm_vhost_evt_work(struct vhost_work *work)
+static void vhost_scsi_evt_work(struct vhost_work *work)
 {
 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
 					vs_event_work);
 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-	struct tcm_vhost_evt *evt;
+	struct vhost_scsi_evt *evt;
 	struct llist_node *llnode;
 
 	mutex_lock(&vq->mutex);
 	llnode = llist_del_all(&vs->vs_event_list);
 	while (llnode) {
-		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+		evt = llist_entry(llnode, struct vhost_scsi_evt, list);
 		llnode = llist_next(llnode);
-		tcm_vhost_do_evt_work(vs, evt);
-		tcm_vhost_free_evt(vs, evt);
+		vhost_scsi_do_evt_work(vs, evt);
+		vhost_scsi_free_evt(vs, evt);
 	}
 	mutex_unlock(&vq->mutex);
 }
@@ -679,15 +682,16 @@
 					vs_completion_work);
 	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
 	struct virtio_scsi_cmd_resp v_rsp;
-	struct tcm_vhost_cmd *cmd;
+	struct vhost_scsi_cmd *cmd;
 	struct llist_node *llnode;
 	struct se_cmd *se_cmd;
+	struct iov_iter iov_iter;
 	int ret, vq;
 
 	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
 	llnode = llist_del_all(&vs->vs_completion_list);
 	while (llnode) {
-		cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+		cmd = llist_entry(llnode, struct vhost_scsi_cmd,
 				     tvc_completion_list);
 		llnode = llist_next(llnode);
 		se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@
 						 se_cmd->scsi_sense_length);
 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
 		       se_cmd->scsi_sense_length);
-		ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
-		if (likely(ret == 0)) {
+
+		iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+			      cmd->tvc_in_iovs, sizeof(v_rsp));
+		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+		if (likely(ret == sizeof(v_rsp))) {
 			struct vhost_scsi_virtqueue *q;
 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@
 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 }
 
-static struct tcm_vhost_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+static struct vhost_scsi_cmd *
+vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
 		   u32 exp_data_len, int data_direction)
 {
-	struct tcm_vhost_cmd *cmd;
-	struct tcm_vhost_nexus *tv_nexus;
+	struct vhost_scsi_cmd *cmd;
+	struct vhost_scsi_nexus *tv_nexus;
 	struct se_session *se_sess;
 	struct scatterlist *sg, *prot_sg;
 	struct page **pages;
@@ -736,22 +743,22 @@
 
 	tv_nexus = tpg->tpg_nexus;
 	if (!tv_nexus) {
-		pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
 		return ERR_PTR(-EIO);
 	}
 	se_sess = tv_nexus->tvn_se_sess;
 
 	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 	if (tag < 0) {
-		pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
-	cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
+	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
 	sg = cmd->tvc_sgl;
 	prot_sg = cmd->tvc_prot_sgl;
 	pages = cmd->tvc_upages;
-	memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
+	memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 
 	cmd->tvc_sgl = sg;
 	cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@
 	cmd->tvc_exp_data_len = exp_data_len;
 	cmd->tvc_data_direction = data_direction;
 	cmd->tvc_nexus = tv_nexus;
-	cmd->inflight = tcm_vhost_get_inflight(vq);
+	cmd->inflight = vhost_scsi_get_inflight(vq);
 
-	memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
+	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 
 	return cmd;
 }
@@ -776,29 +783,22 @@
  * Returns the number of scatterlist entries used or -errno on error.
  */
 static int
-vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+		      void __user *ptr,
+		      size_t len,
 		      struct scatterlist *sgl,
-		      unsigned int sgl_count,
-		      struct iovec *iov,
-		      struct page **pages,
 		      bool write)
 {
-	unsigned int npages = 0, pages_nr, offset, nbytes;
+	unsigned int npages = 0, offset, nbytes;
+	unsigned int pages_nr = iov_num_pages(ptr, len);
 	struct scatterlist *sg = sgl;
-	void __user *ptr = iov->iov_base;
-	size_t len = iov->iov_len;
+	struct page **pages = cmd->tvc_upages;
 	int ret, i;
 
-	pages_nr = iov_num_pages(iov);
-	if (pages_nr > sgl_count) {
+	if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
 		pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-		       " sgl_count: %u\n", pages_nr, sgl_count);
-		return -ENOBUFS;
-	}
-	if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
-		pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-		       " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
-			pages_nr, TCM_VHOST_PREALLOC_UPAGES);
+		       " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
+			pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
 		return -ENOBUFS;
 	}
 
@@ -829,84 +829,94 @@
 }
 
 static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
-			  struct iovec *iov,
-			  int niov,
-			  bool write)
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 {
-	struct scatterlist *sg = cmd->tvc_sgl;
-	unsigned int sgl_count = 0;
-	int ret, i;
+	int sgl_count = 0;
 
-	for (i = 0; i < niov; i++)
-		sgl_count += iov_num_pages(&iov[i]);
-
-	if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-		pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
-			" preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-			sgl_count, TCM_VHOST_PREALLOC_SGLS);
-		return -ENOBUFS;
+	if (!iter || !iter->iov) {
+		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+		       " present\n", __func__, bytes);
+		return -EINVAL;
 	}
 
-	pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
-	sg_init_table(sg, sgl_count);
-	cmd->tvc_sgl_count = sgl_count;
+	sgl_count = iov_iter_npages(iter, 0xffff);
+	if (sgl_count > max_sgls) {
+		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+		return -EINVAL;
+	}
+	return sgl_count;
+}
 
-	pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
+static int
+vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+		      struct iov_iter *iter,
+		      struct scatterlist *sg, int sg_count)
+{
+	size_t off = iter->iov_offset;
+	int i, ret;
 
-	for (i = 0; i < niov; i++) {
-		ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-					    cmd->tvc_upages, write);
+	for (i = 0; i < iter->nr_segs; i++) {
+		void __user *base = iter->iov[i].iov_base + off;
+		size_t len = iter->iov[i].iov_len - off;
+
+		ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 		if (ret < 0) {
-			for (i = 0; i < cmd->tvc_sgl_count; i++)
-				put_page(sg_page(&cmd->tvc_sgl[i]));
-
-			cmd->tvc_sgl_count = 0;
+			for (i = 0; i < sg_count; i++) {
+				struct page *page = sg_page(&sg[i]);
+				if (page)
+					put_page(page);
+			}
 			return ret;
 		}
 		sg += ret;
-		sgl_count -= ret;
+		off = 0;
 	}
 	return 0;
 }
 
 static int
-vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
-			   struct iovec *iov,
-			   int niov,
-			   bool write)
+vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+		 size_t prot_bytes, struct iov_iter *prot_iter,
+		 size_t data_bytes, struct iov_iter *data_iter)
 {
-	struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
-	unsigned int prot_sgl_count = 0;
-	int ret, i;
+	int sgl_count, ret;
+	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
 
-	for (i = 0; i < niov; i++)
-		prot_sgl_count += iov_num_pages(&iov[i]);
+	if (prot_bytes) {
+		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+						 VHOST_SCSI_PREALLOC_PROT_SGLS);
+		if (sgl_count < 0)
+			return sgl_count;
 
-	if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
-		pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
-			" preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
-			prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
-		return -ENOBUFS;
-	}
+		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+		cmd->tvc_prot_sgl_count = sgl_count;
+		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
 
-	pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
-		 prot_sg, prot_sgl_count);
-	sg_init_table(prot_sg, prot_sgl_count);
-	cmd->tvc_prot_sgl_count = prot_sgl_count;
-
-	for (i = 0; i < niov; i++) {
-		ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
-					    cmd->tvc_upages, write);
+		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
+					    cmd->tvc_prot_sgl,
+					    cmd->tvc_prot_sgl_count);
 		if (ret < 0) {
-			for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
-				put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
 			cmd->tvc_prot_sgl_count = 0;
 			return ret;
 		}
-		prot_sg += ret;
-		prot_sgl_count -= ret;
+	}
+	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+					 VHOST_SCSI_PREALLOC_SGLS);
+	if (sgl_count < 0)
+		return sgl_count;
+
+	sg_init_table(cmd->tvc_sgl, sgl_count);
+	cmd->tvc_sgl_count = sgl_count;
+	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+		  cmd->tvc_sgl, cmd->tvc_sgl_count);
+
+	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
+				    cmd->tvc_sgl, cmd->tvc_sgl_count);
+	if (ret < 0) {
+		cmd->tvc_sgl_count = 0;
+		return ret;
 	}
 	return 0;
 }
@@ -928,11 +938,11 @@
 	return TCM_SIMPLE_TAG;
 }
 
-static void tcm_vhost_submission_work(struct work_struct *work)
+static void vhost_scsi_submission_work(struct work_struct *work)
 {
-	struct tcm_vhost_cmd *cmd =
-		container_of(work, struct tcm_vhost_cmd, work);
-	struct tcm_vhost_nexus *tv_nexus;
+	struct vhost_scsi_cmd *cmd =
+		container_of(work, struct vhost_scsi_cmd, work);
+	struct vhost_scsi_nexus *tv_nexus;
 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
 	int rc;
@@ -986,19 +996,20 @@
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
-	struct tcm_vhost_tpg **vs_tpg;
+	struct vhost_scsi_tpg **vs_tpg, *tpg;
 	struct virtio_scsi_cmd_req v_req;
 	struct virtio_scsi_cmd_req_pi v_req_pi;
-	struct tcm_vhost_tpg *tpg;
-	struct tcm_vhost_cmd *cmd;
+	struct vhost_scsi_cmd *cmd;
+	struct iov_iter out_iter, in_iter, prot_iter, data_iter;
 	u64 tag;
-	u32 exp_data_len, data_first, data_num, data_direction, prot_first;
-	unsigned out, in, i;
-	int head, ret, data_niov, prot_niov, prot_bytes;
-	size_t req_size;
+	u32 exp_data_len, data_direction;
+	unsigned out, in;
+	int head, ret, prot_bytes;
+	size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+	size_t out_size, in_size;
 	u16 lun;
 	u8 *target, *lunp, task_attr;
-	bool hdr_pi;
+	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
 	void *req, *cdb;
 
 	mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@
 
 	for (;;) {
 		head = vhost_get_vq_desc(vq, vq->iov,
-					ARRAY_SIZE(vq->iov), &out, &in,
-					NULL, NULL);
+					 ARRAY_SIZE(vq->iov), &out, &in,
+					 NULL, NULL);
 		pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
-					head, out, in);
+			 head, out, in);
 		/* On error, stop handling until the next kick. */
 		if (unlikely(head < 0))
 			break;
@@ -1029,113 +1040,134 @@
 			}
 			break;
 		}
-
-		/* FIXME: BIDI operation */
-		if (out == 1 && in == 1) {
-			data_direction = DMA_NONE;
-			data_first = 0;
-			data_num = 0;
-		} else if (out == 1 && in > 1) {
-			data_direction = DMA_FROM_DEVICE;
-			data_first = out + 1;
-			data_num = in - 1;
-		} else if (out > 1 && in == 1) {
-			data_direction = DMA_TO_DEVICE;
-			data_first = 1;
-			data_num = out - 1;
-		} else {
-			vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
-					out, in);
-			break;
-		}
-
 		/*
-		 * Check for a sane resp buffer so we can report errors to
-		 * the guest.
+		 * Check for a sane response buffer so we can report early
+		 * errors back to the guest.
 		 */
-		if (unlikely(vq->iov[out].iov_len !=
-					sizeof(struct virtio_scsi_cmd_resp))) {
-			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
-				" bytes\n", vq->iov[out].iov_len);
+		if (unlikely(vq->iov[out].iov_len < rsp_size)) {
+			vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
+				" size, got %zu bytes\n", vq->iov[out].iov_len);
 			break;
 		}
-
-		if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+		/*
+		 * Setup pointers and values based upon different virtio-scsi
+		 * request header if T10_PI is enabled in KVM guest.
+		 */
+		if (t10_pi) {
 			req = &v_req_pi;
+			req_size = sizeof(v_req_pi);
 			lunp = &v_req_pi.lun[0];
 			target = &v_req_pi.lun[1];
-			req_size = sizeof(v_req_pi);
-			hdr_pi = true;
 		} else {
 			req = &v_req;
+			req_size = sizeof(v_req);
 			lunp = &v_req.lun[0];
 			target = &v_req.lun[1];
-			req_size = sizeof(v_req);
-			hdr_pi = false;
 		}
+		/*
+		 * FIXME: Not correct for BIDI operation
+		 */
+		out_size = iov_length(vq->iov, out);
+		in_size = iov_length(&vq->iov[out], in);
 
-		if (unlikely(vq->iov[0].iov_len < req_size)) {
-			pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
-			       req_size, vq->iov[0].iov_len);
-			break;
-		}
-		ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
-		if (unlikely(ret)) {
-			vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
-			break;
-		}
+		/*
+		 * Copy over the virtio-scsi request header, which for a
+		 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+		 * single iovec may contain both the header + outgoing
+		 * WRITE payloads.
+		 *
+		 * copy_from_iter() will advance out_iter, so that it will
+		 * point at the start of the outgoing WRITE payload, if
+		 * DMA_TO_DEVICE is set.
+		 */
+		iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 
+		ret = copy_from_iter(req, req_size, &out_iter);
+		if (unlikely(ret != req_size)) {
+			vq_err(vq, "Faulted on copy_from_iter\n");
+			vhost_scsi_send_bad_target(vs, vq, head, out);
+			continue;
+		}
 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
 		if (unlikely(*lunp != 1)) {
+			vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
 			vhost_scsi_send_bad_target(vs, vq, head, out);
 			continue;
 		}
 
 		tpg = ACCESS_ONCE(vs_tpg[*target]);
-
-		/* Target does not exist, fail the request */
 		if (unlikely(!tpg)) {
+			/* Target does not exist, fail the request */
 			vhost_scsi_send_bad_target(vs, vq, head, out);
 			continue;
 		}
-
-		data_niov = data_num;
-		prot_niov = prot_first = prot_bytes = 0;
 		/*
-		 * Determine if any protection information iovecs are preceeding
-		 * the actual data payload, and adjust data_first + data_niov
-		 * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+		 * Determine data_direction by calculating the total outgoing
+		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+		 * response headers respectively.
 		 *
-		 * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
+		 * to the right place.
+		 *
+		 * For DMA_FROM_DEVICE, the iovec will be just past the end
+		 * of the virtio-scsi response header in either the same
+		 * or immediately following iovec.
+		 *
+		 * Any associated T10_PI bytes for the outgoing / incoming
+		 * payloads are included in calculation of exp_data_len here.
 		 */
-		if (hdr_pi) {
+		prot_bytes = 0;
+
+		if (out_size > req_size) {
+			data_direction = DMA_TO_DEVICE;
+			exp_data_len = out_size - req_size;
+			data_iter = out_iter;
+		} else if (in_size > rsp_size) {
+			data_direction = DMA_FROM_DEVICE;
+			exp_data_len = in_size - rsp_size;
+
+			iov_iter_init(&in_iter, READ, &vq->iov[out], in,
+				      rsp_size + exp_data_len);
+			iov_iter_advance(&in_iter, rsp_size);
+			data_iter = in_iter;
+		} else {
+			data_direction = DMA_NONE;
+			exp_data_len = 0;
+		}
+		/*
+		 * If T10_PI header + payload is present, setup prot_iter values
+		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
+		 * host scatterlists via get_user_pages_fast().
+		 */
+		if (t10_pi) {
 			if (v_req_pi.pi_bytesout) {
 				if (data_direction != DMA_TO_DEVICE) {
-					vq_err(vq, "Received non zero do_pi_niov"
-						", but wrong data_direction\n");
-					goto err_cmd;
+					vq_err(vq, "Received non zero pi_bytesout,"
+						" but wrong data_direction\n");
+					vhost_scsi_send_bad_target(vs, vq, head, out);
+					continue;
 				}
 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
 			} else if (v_req_pi.pi_bytesin) {
 				if (data_direction != DMA_FROM_DEVICE) {
-					vq_err(vq, "Received non zero di_pi_niov"
-						", but wrong data_direction\n");
-					goto err_cmd;
+					vq_err(vq, "Received non zero pi_bytesin,"
+						" but wrong data_direction\n");
+					vhost_scsi_send_bad_target(vs, vq, head, out);
+					continue;
 				}
 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
 			}
+			/*
+			 * Set prot_iter to data_iter, and advance past any
+			 * preceeding prot_bytes that may be present.
+			 *
+			 * Also fix up the exp_data_len to reflect only the
+			 * actual data payload length.
+			 */
 			if (prot_bytes) {
-				int tmp = 0;
-
-				for (i = 0; i < data_num; i++) {
-					tmp += vq->iov[data_first + i].iov_len;
-					prot_niov++;
-					if (tmp >= prot_bytes)
-						break;
-				}
-				prot_first = data_first;
-				data_first += prot_niov;
-				data_niov = data_num - prot_niov;
+				exp_data_len -= prot_bytes;
+				prot_iter = data_iter;
+				iov_iter_advance(&data_iter, prot_bytes);
 			}
 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
 			task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@
 			cdb = &v_req.cdb[0];
 			lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
 		}
-		exp_data_len = 0;
-		for (i = 0; i < data_niov; i++)
-			exp_data_len += vq->iov[data_first + i].iov_len;
 		/*
-		 * Check that the recieved CDB size does not exceeded our
-		 * hardcoded max for vhost-scsi
+		 * Check that the received CDB size does not exceeded our
+		 * hardcoded max for vhost-scsi, then get a pre-allocated
+		 * cmd descriptor for the new virtio-scsi tag.
 		 *
 		 * TODO what if cdb was too small for varlen cdb header?
 		 */
-		if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-				scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
-			goto err_cmd;
+				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+			vhost_scsi_send_bad_target(vs, vq, head, out);
+			continue;
 		}
-
 		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
 					 exp_data_len + prot_bytes,
 					 data_direction);
 		if (IS_ERR(cmd)) {
 			vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
-					PTR_ERR(cmd));
-			goto err_cmd;
+			       PTR_ERR(cmd));
+			vhost_scsi_send_bad_target(vs, vq, head, out);
+			continue;
 		}
-
-		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
-			": %d\n", cmd, exp_data_len, data_direction);
-
 		cmd->tvc_vhost = vs;
 		cmd->tvc_vq = vq;
-		cmd->tvc_resp = vq->iov[out].iov_base;
+		cmd->tvc_resp_iov = &vq->iov[out];
+		cmd->tvc_in_iovs = in;
 
 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
-			cmd->tvc_cdb[0], cmd->tvc_lun);
+			 cmd->tvc_cdb[0], cmd->tvc_lun);
+		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
 
-		if (prot_niov) {
-			ret = vhost_scsi_map_iov_to_prot(cmd,
-					&vq->iov[prot_first], prot_niov,
-					data_direction == DMA_FROM_DEVICE);
-			if (unlikely(ret)) {
-				vq_err(vq, "Failed to map iov to"
-					" prot_sgl\n");
-				goto err_free;
-			}
-		}
 		if (data_direction != DMA_NONE) {
-			ret = vhost_scsi_map_iov_to_sgl(cmd,
-					&vq->iov[data_first], data_niov,
-					data_direction == DMA_FROM_DEVICE);
+			ret = vhost_scsi_mapal(cmd,
+					       prot_bytes, &prot_iter,
+					       exp_data_len, &data_iter);
 			if (unlikely(ret)) {
 				vq_err(vq, "Failed to map iov to sgl\n");
-				goto err_free;
+				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+				vhost_scsi_send_bad_target(vs, vq, head, out);
+				continue;
 			}
 		}
 		/*
 		 * Save the descriptor from vhost_get_vq_desc() to be used to
 		 * complete the virtio-scsi request in TCM callback context via
-		 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
 		 */
 		cmd->tvc_vq_desc = head;
 		/*
-		 * Dispatch tv_cmd descriptor for cmwq execution in process
-		 * context provided by tcm_vhost_workqueue.  This also ensures
-		 * tv_cmd is executed on the same kworker CPU as this vhost
-		 * thread to gain positive L2 cache locality effects..
+		 * Dispatch cmd descriptor for cmwq execution in process
+		 * context provided by vhost_scsi_workqueue.  This also ensures
+		 * cmd is executed on the same kworker CPU as this vhost
+		 * thread to gain positive L2 cache locality effects.
 		 */
-		INIT_WORK(&cmd->work, tcm_vhost_submission_work);
-		queue_work(tcm_vhost_workqueue, &cmd->work);
+		INIT_WORK(&cmd->work, vhost_scsi_submission_work);
+		queue_work(vhost_scsi_workqueue, &cmd->work);
 	}
-
-	mutex_unlock(&vq->mutex);
-	return;
-
-err_free:
-	vhost_scsi_free_cmd(cmd);
-err_cmd:
-	vhost_scsi_send_bad_target(vs, vq, head, out);
 out:
 	mutex_unlock(&vq->mutex);
 }
@@ -1234,15 +1248,15 @@
 }
 
 static void
-tcm_vhost_send_evt(struct vhost_scsi *vs,
-		   struct tcm_vhost_tpg *tpg,
+vhost_scsi_send_evt(struct vhost_scsi *vs,
+		   struct vhost_scsi_tpg *tpg,
 		   struct se_lun *lun,
 		   u32 event,
 		   u32 reason)
 {
-	struct tcm_vhost_evt *evt;
+	struct vhost_scsi_evt *evt;
 
-	evt = tcm_vhost_allocate_evt(vs, event, reason);
+	evt = vhost_scsi_allocate_evt(vs, event, reason);
 	if (!evt)
 		return;
 
@@ -1253,7 +1267,7 @@
 		 * lun[4-7] need to be zero according to virtio-scsi spec.
 		 */
 		evt->event.lun[0] = 0x01;
-		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+		evt->event.lun[1] = tpg->tport_tpgt;
 		if (lun->unpacked_lun >= 256)
 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@
 		goto out;
 
 	if (vs->vs_events_missed)
-		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
 out:
 	mutex_unlock(&vq->mutex);
 }
@@ -1300,7 +1314,7 @@
 	int i;
 
 	/* Init new inflight and remember the old inflight */
-	tcm_vhost_init_inflight(vs, old_inflight);
+	vhost_scsi_init_inflight(vs, old_inflight);
 
 	/*
 	 * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@
 	 * when all the reqs are finished.
 	 */
 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
-		kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
 
 	/* Flush both the vhost poll and vhost work */
 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@
 
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
- * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
  *
  *  The lock nesting rule is:
- *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
 			struct vhost_scsi_target *t)
 {
 	struct se_portal_group *se_tpg;
-	struct tcm_vhost_tport *tv_tport;
-	struct tcm_vhost_tpg *tpg;
-	struct tcm_vhost_tpg **vs_tpg;
+	struct vhost_scsi_tport *tv_tport;
+	struct vhost_scsi_tpg *tpg;
+	struct vhost_scsi_tpg **vs_tpg;
 	struct vhost_virtqueue *vq;
 	int index, ret, i, len;
 	bool match = false;
 
-	mutex_lock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
 	mutex_lock(&vs->dev.mutex);
 
 	/* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@
 	if (vs->vs_tpg)
 		memcpy(vs_tpg, vs->vs_tpg, len);
 
-	list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
 		mutex_lock(&tpg->tv_tpg_mutex);
 		if (!tpg->tpg_nexus) {
 			mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@
 
 out:
 	mutex_unlock(&vs->dev.mutex);
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 	return ret;
 }
 
@@ -1438,14 +1452,14 @@
 			  struct vhost_scsi_target *t)
 {
 	struct se_portal_group *se_tpg;
-	struct tcm_vhost_tport *tv_tport;
-	struct tcm_vhost_tpg *tpg;
+	struct vhost_scsi_tport *tv_tport;
+	struct vhost_scsi_tpg *tpg;
 	struct vhost_virtqueue *vq;
 	bool match = false;
 	int index, ret, i;
 	u8 target;
 
-	mutex_lock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
 	mutex_lock(&vs->dev.mutex);
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@
 	vs->vs_tpg = NULL;
 	WARN_ON(vs->vs_events_nr);
 	mutex_unlock(&vs->dev.mutex);
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 	return 0;
 
 err_tpg:
 	mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
 	mutex_unlock(&vs->dev.mutex);
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 	return ret;
 }
 
@@ -1565,7 +1579,7 @@
 		goto err_vqs;
 
 	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
-	vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
+	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
 
 	vs->vs_events_nr = 0;
 	vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@
 	}
 	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 
-	tcm_vhost_init_inflight(vs, NULL);
+	vhost_scsi_init_inflight(vs, NULL);
 
 	f->private_data = vs;
 	return 0;
@@ -1712,7 +1726,7 @@
 	return misc_deregister(&vhost_scsi_misc);
 }
 
-static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
 {
 	switch (tport->tport_proto_id) {
 	case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@
 }
 
 static void
-tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
 		  struct se_lun *lun, bool plug)
 {
 
@@ -1750,71 +1764,71 @@
 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 	mutex_lock(&vq->mutex);
 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
-		tcm_vhost_send_evt(vs, tpg, lun,
+		vhost_scsi_send_evt(vs, tpg, lun,
 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
 	mutex_unlock(&vq->mutex);
 	mutex_unlock(&vs->dev.mutex);
 }
 
-static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-	tcm_vhost_do_plug(tpg, lun, true);
+	vhost_scsi_do_plug(tpg, lun, true);
 }
 
-static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-	tcm_vhost_do_plug(tpg, lun, false);
+	vhost_scsi_do_plug(tpg, lun, false);
 }
 
-static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
 			       struct se_lun *lun)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
 
-	mutex_lock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
 
 	mutex_lock(&tpg->tv_tpg_mutex);
 	tpg->tv_tpg_port_count++;
 	mutex_unlock(&tpg->tv_tpg_mutex);
 
-	tcm_vhost_hotplug(tpg, lun);
+	vhost_scsi_hotplug(tpg, lun);
 
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 
 	return 0;
 }
 
-static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
 				  struct se_lun *lun)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
 
-	mutex_lock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
 
 	mutex_lock(&tpg->tv_tpg_mutex);
 	tpg->tv_tpg_port_count--;
 	mutex_unlock(&tpg->tv_tpg_mutex);
 
-	tcm_vhost_hotunplug(tpg, lun);
+	vhost_scsi_hotunplug(tpg, lun);
 
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 }
 
 static struct se_node_acl *
-tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
+vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
 		       struct config_group *group,
 		       const char *name)
 {
 	struct se_node_acl *se_nacl, *se_nacl_new;
-	struct tcm_vhost_nacl *nacl;
+	struct vhost_scsi_nacl *nacl;
 	u64 wwpn = 0;
 	u32 nexus_depth;
 
-	/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+	/* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
 		return ERR_PTR(-EINVAL); */
-	se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+	se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
 	if (!se_nacl_new)
 		return ERR_PTR(-ENOMEM);
 
@@ -1826,37 +1840,37 @@
 	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
 				name, nexus_depth);
 	if (IS_ERR(se_nacl)) {
-		tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+		vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
 		return se_nacl;
 	}
 	/*
-	 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+	 * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
 	 */
-	nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+	nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
 	nacl->iport_wwpn = wwpn;
 
 	return se_nacl;
 }
 
-static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
 {
-	struct tcm_vhost_nacl *nacl = container_of(se_acl,
-				struct tcm_vhost_nacl, se_node_acl);
+	struct vhost_scsi_nacl *nacl = container_of(se_acl,
+				struct vhost_scsi_nacl, se_node_acl);
 	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
 	kfree(nacl);
 }
 
-static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
+static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
 				       struct se_session *se_sess)
 {
-	struct tcm_vhost_cmd *tv_cmd;
+	struct vhost_scsi_cmd *tv_cmd;
 	unsigned int i;
 
 	if (!se_sess->sess_cmd_map)
 		return;
 
-	for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-		tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
 		kfree(tv_cmd->tvc_sgl);
 		kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@
 	}
 }
 
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
 				const char *name)
 {
 	struct se_portal_group *se_tpg;
 	struct se_session *se_sess;
-	struct tcm_vhost_nexus *tv_nexus;
-	struct tcm_vhost_cmd *tv_cmd;
+	struct vhost_scsi_nexus *tv_nexus;
+	struct vhost_scsi_cmd *tv_cmd;
 	unsigned int i;
 
 	mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@
 	}
 	se_tpg = &tpg->se_tpg;
 
-	tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+	tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
 	if (!tv_nexus) {
 		mutex_unlock(&tpg->tv_tpg_mutex);
-		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
 		return -ENOMEM;
 	}
 	/*
 	 *  Initialize the struct se_session pointer and setup tagpool
-	 *  for struct tcm_vhost_cmd descriptors
+	 *  for struct vhost_scsi_cmd descriptors
 	 */
 	tv_nexus->tvn_se_sess = transport_init_session_tags(
-					TCM_VHOST_DEFAULT_TAGS,
-					sizeof(struct tcm_vhost_cmd),
+					VHOST_SCSI_DEFAULT_TAGS,
+					sizeof(struct vhost_scsi_cmd),
 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
 		mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@
 		return -ENOMEM;
 	}
 	se_sess = tv_nexus->tvn_se_sess;
-	for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-		tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
 		tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
-					TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
+					VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
 		if (!tv_cmd->tvc_sgl) {
 			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@
 		}
 
 		tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-					TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
+					VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
 		if (!tv_cmd->tvc_upages) {
 			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@
 		}
 
 		tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-					TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+					VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
 		if (!tv_cmd->tvc_prot_sgl) {
 			mutex_unlock(&tpg->tv_tpg_mutex);
 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@
 	}
 	/*
 	 * Since we are running in 'demo mode' this call with generate a
-	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
+	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
 	 * the SCSI Initiator port name of the passed configfs group 'name'.
 	 */
 	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@
 	return 0;
 
 out:
-	tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
 	transport_free_session(se_sess);
 	kfree(tv_nexus);
 	return -ENOMEM;
 }
 
-static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 {
 	struct se_session *se_sess;
-	struct tcm_vhost_nexus *tv_nexus;
+	struct vhost_scsi_nexus *tv_nexus;
 
 	mutex_lock(&tpg->tv_tpg_mutex);
 	tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@
 	}
 
 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
-		" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 
-	tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
 	/*
 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
 	 */
@@ -2009,12 +2023,12 @@
 	return 0;
 }
 
-static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
 					char *page)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_nexus *tv_nexus;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_nexus *tv_nexus;
 	ssize_t ret;
 
 	mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@
 	return ret;
 }
 
-static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
 					 const char *page,
 					 size_t count)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
-	struct tcm_vhost_tport *tport_wwn = tpg->tport;
-	unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
+	struct vhost_scsi_tport *tport_wwn = tpg->tport;
+	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
 	int ret;
 	/*
 	 * Shutdown the active I_T nexus if 'NULL' is passed..
 	 */
 	if (!strncmp(page, "NULL", 4)) {
-		ret = tcm_vhost_drop_nexus(tpg);
+		ret = vhost_scsi_drop_nexus(tpg);
 		return (!ret) ? count : ret;
 	}
 	/*
 	 * Otherwise make sure the passed virtual Initiator port WWN matches
-	 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
-	 * tcm_vhost_make_nexus().
+	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+	 * vhost_scsi_make_nexus().
 	 */
-	if (strlen(page) >= TCM_VHOST_NAMELEN) {
+	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
 		pr_err("Emulated NAA Sas Address: %s, exceeds"
-				" max: %d\n", page, TCM_VHOST_NAMELEN);
+				" max: %d\n", page, VHOST_SCSI_NAMELEN);
 		return -EINVAL;
 	}
-	snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
 
 	ptr = strstr(i_port, "naa.");
 	if (ptr) {
 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
 			pr_err("Passed SAS Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
-				tcm_vhost_dump_proto_id(tport_wwn));
+				vhost_scsi_dump_proto_id(tport_wwn));
 			return -EINVAL;
 		}
 		port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@
 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
 			pr_err("Passed FCP Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
-				tcm_vhost_dump_proto_id(tport_wwn));
+				vhost_scsi_dump_proto_id(tport_wwn));
 			return -EINVAL;
 		}
 		port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@
 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
 			pr_err("Passed iSCSI Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
-				tcm_vhost_dump_proto_id(tport_wwn));
+				vhost_scsi_dump_proto_id(tport_wwn));
 			return -EINVAL;
 		}
 		port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@
 	if (i_port[strlen(i_port)-1] == '\n')
 		i_port[strlen(i_port)-1] = '\0';
 
-	ret = tcm_vhost_make_nexus(tpg, port_ptr);
+	ret = vhost_scsi_make_nexus(tpg, port_ptr);
 	if (ret < 0)
 		return ret;
 
 	return count;
 }
 
-TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
 
-static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
-	&tcm_vhost_tpg_nexus.attr,
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+	&vhost_scsi_tpg_nexus.attr,
 	NULL,
 };
 
 static struct se_portal_group *
-tcm_vhost_make_tpg(struct se_wwn *wwn,
+vhost_scsi_make_tpg(struct se_wwn *wwn,
 		   struct config_group *group,
 		   const char *name)
 {
-	struct tcm_vhost_tport *tport = container_of(wwn,
-			struct tcm_vhost_tport, tport_wwn);
+	struct vhost_scsi_tport *tport = container_of(wwn,
+			struct vhost_scsi_tport, tport_wwn);
 
-	struct tcm_vhost_tpg *tpg;
-	unsigned long tpgt;
+	struct vhost_scsi_tpg *tpg;
+	u16 tpgt;
 	int ret;
 
 	if (strstr(name, "tpgt_") != name)
 		return ERR_PTR(-EINVAL);
-	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
 		return ERR_PTR(-EINVAL);
 
-	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+	tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
 	if (!tpg) {
-		pr_err("Unable to allocate struct tcm_vhost_tpg");
+		pr_err("Unable to allocate struct vhost_scsi_tpg");
 		return ERR_PTR(-ENOMEM);
 	}
 	mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@
 	tpg->tport = tport;
 	tpg->tport_tpgt = tpgt;
 
-	ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+	ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
 				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
 	if (ret < 0) {
 		kfree(tpg);
 		return NULL;
 	}
-	mutex_lock(&tcm_vhost_mutex);
-	list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
+	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+	mutex_unlock(&vhost_scsi_mutex);
 
 	return &tpg->se_tpg;
 }
 
-static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
 {
-	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-				struct tcm_vhost_tpg, se_tpg);
+	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+				struct vhost_scsi_tpg, se_tpg);
 
-	mutex_lock(&tcm_vhost_mutex);
+	mutex_lock(&vhost_scsi_mutex);
 	list_del(&tpg->tv_tpg_list);
-	mutex_unlock(&tcm_vhost_mutex);
+	mutex_unlock(&vhost_scsi_mutex);
 	/*
 	 * Release the virtual I_T Nexus for this vhost TPG
 	 */
-	tcm_vhost_drop_nexus(tpg);
+	vhost_scsi_drop_nexus(tpg);
 	/*
 	 * Deregister the se_tpg from TCM..
 	 */
@@ -2175,21 +2189,21 @@
 }
 
 static struct se_wwn *
-tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
 		     struct config_group *group,
 		     const char *name)
 {
-	struct tcm_vhost_tport *tport;
+	struct vhost_scsi_tport *tport;
 	char *ptr;
 	u64 wwpn = 0;
 	int off = 0;
 
-	/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
 		return ERR_PTR(-EINVAL); */
 
-	tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+	tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
 	if (!tport) {
-		pr_err("Unable to allocate struct tcm_vhost_tport");
+		pr_err("Unable to allocate struct vhost_scsi_tport");
 		return ERR_PTR(-ENOMEM);
 	}
 	tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@
 	return ERR_PTR(-EINVAL);
 
 check_len:
-	if (strlen(name) >= TCM_VHOST_NAMELEN) {
+	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
 		pr_err("Emulated %s Address: %s, exceeds"
-			" max: %d\n", name, tcm_vhost_dump_proto_id(tport),
-			TCM_VHOST_NAMELEN);
+			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+			VHOST_SCSI_NAMELEN);
 		kfree(tport);
 		return ERR_PTR(-EINVAL);
 	}
-	snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
 
 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
-		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
 
 	return &tport->tport_wwn;
 }
 
-static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
 {
-	struct tcm_vhost_tport *tport = container_of(wwn,
-				struct tcm_vhost_tport, tport_wwn);
+	struct vhost_scsi_tport *tport = container_of(wwn,
+				struct vhost_scsi_tport, tport_wwn);
 
 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
-		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
 		tport->tport_name);
 
 	kfree(tport);
 }
 
 static ssize_t
-tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
+vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
 				char *page)
 {
 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
-		"on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
 		utsname()->machine);
 }
 
-TF_WWN_ATTR_RO(tcm_vhost, version);
+TF_WWN_ATTR_RO(vhost_scsi, version);
 
-static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
-	&tcm_vhost_wwn_version.attr,
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+	&vhost_scsi_wwn_version.attr,
 	NULL,
 };
 
-static struct target_core_fabric_ops tcm_vhost_ops = {
-	.get_fabric_name		= tcm_vhost_get_fabric_name,
-	.get_fabric_proto_ident		= tcm_vhost_get_fabric_proto_ident,
-	.tpg_get_wwn			= tcm_vhost_get_fabric_wwn,
-	.tpg_get_tag			= tcm_vhost_get_tag,
-	.tpg_get_default_depth		= tcm_vhost_get_default_depth,
-	.tpg_get_pr_transport_id	= tcm_vhost_get_pr_transport_id,
-	.tpg_get_pr_transport_id_len	= tcm_vhost_get_pr_transport_id_len,
-	.tpg_parse_pr_out_transport_id	= tcm_vhost_parse_pr_out_transport_id,
-	.tpg_check_demo_mode		= tcm_vhost_check_true,
-	.tpg_check_demo_mode_cache	= tcm_vhost_check_true,
-	.tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
-	.tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
-	.tpg_alloc_fabric_acl		= tcm_vhost_alloc_fabric_acl,
-	.tpg_release_fabric_acl		= tcm_vhost_release_fabric_acl,
-	.tpg_get_inst_index		= tcm_vhost_tpg_get_inst_index,
-	.release_cmd			= tcm_vhost_release_cmd,
+static struct target_core_fabric_ops vhost_scsi_ops = {
+	.get_fabric_name		= vhost_scsi_get_fabric_name,
+	.get_fabric_proto_ident		= vhost_scsi_get_fabric_proto_ident,
+	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
+	.tpg_get_tag			= vhost_scsi_get_tpgt,
+	.tpg_get_default_depth		= vhost_scsi_get_default_depth,
+	.tpg_get_pr_transport_id	= vhost_scsi_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= vhost_scsi_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= vhost_scsi_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= vhost_scsi_check_true,
+	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
+	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
+	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+	.tpg_alloc_fabric_acl		= vhost_scsi_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= vhost_scsi_release_fabric_acl,
+	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
+	.release_cmd			= vhost_scsi_release_cmd,
 	.check_stop_free		= vhost_scsi_check_stop_free,
-	.shutdown_session		= tcm_vhost_shutdown_session,
-	.close_session			= tcm_vhost_close_session,
-	.sess_get_index			= tcm_vhost_sess_get_index,
+	.shutdown_session		= vhost_scsi_shutdown_session,
+	.close_session			= vhost_scsi_close_session,
+	.sess_get_index			= vhost_scsi_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
-	.write_pending			= tcm_vhost_write_pending,
-	.write_pending_status		= tcm_vhost_write_pending_status,
-	.set_default_node_attributes	= tcm_vhost_set_default_node_attrs,
-	.get_task_tag			= tcm_vhost_get_task_tag,
-	.get_cmd_state			= tcm_vhost_get_cmd_state,
-	.queue_data_in			= tcm_vhost_queue_data_in,
-	.queue_status			= tcm_vhost_queue_status,
-	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
-	.aborted_task			= tcm_vhost_aborted_task,
+	.write_pending			= vhost_scsi_write_pending,
+	.write_pending_status		= vhost_scsi_write_pending_status,
+	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
+	.get_task_tag			= vhost_scsi_get_task_tag,
+	.get_cmd_state			= vhost_scsi_get_cmd_state,
+	.queue_data_in			= vhost_scsi_queue_data_in,
+	.queue_status			= vhost_scsi_queue_status,
+	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
+	.aborted_task			= vhost_scsi_aborted_task,
 	/*
 	 * Setup callers for generic logic in target_core_fabric_configfs.c
 	 */
-	.fabric_make_wwn		= tcm_vhost_make_tport,
-	.fabric_drop_wwn		= tcm_vhost_drop_tport,
-	.fabric_make_tpg		= tcm_vhost_make_tpg,
-	.fabric_drop_tpg		= tcm_vhost_drop_tpg,
-	.fabric_post_link		= tcm_vhost_port_link,
-	.fabric_pre_unlink		= tcm_vhost_port_unlink,
+	.fabric_make_wwn		= vhost_scsi_make_tport,
+	.fabric_drop_wwn		= vhost_scsi_drop_tport,
+	.fabric_make_tpg		= vhost_scsi_make_tpg,
+	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
+	.fabric_post_link		= vhost_scsi_port_link,
+	.fabric_pre_unlink		= vhost_scsi_port_unlink,
 	.fabric_make_np			= NULL,
 	.fabric_drop_np			= NULL,
-	.fabric_make_nodeacl		= tcm_vhost_make_nodeacl,
-	.fabric_drop_nodeacl		= tcm_vhost_drop_nodeacl,
+	.fabric_make_nodeacl		= vhost_scsi_make_nodeacl,
+	.fabric_drop_nodeacl		= vhost_scsi_drop_nodeacl,
 };
 
-static int tcm_vhost_register_configfs(void)
+static int vhost_scsi_register_configfs(void)
 {
 	struct target_fabric_configfs *fabric;
 	int ret;
 
-	pr_debug("TCM_VHOST fabric module %s on %s/%s"
-		" on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+	pr_debug("vhost-scsi fabric module %s on %s/%s"
+		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
 		utsname()->machine);
 	/*
 	 * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@
 		return PTR_ERR(fabric);
 	}
 	/*
-	 * Setup fabric->tf_ops from our local tcm_vhost_ops
+	 * Setup fabric->tf_ops from our local vhost_scsi_ops
 	 */
-	fabric->tf_ops = tcm_vhost_ops;
+	fabric->tf_ops = vhost_scsi_ops;
 	/*
 	 * Setup default attribute lists for various fabric->tf_cit_tmpl
 	 */
-	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
-	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
+	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
 	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
 	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
 	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@
 	/*
 	 * Setup our local pointer to *fabric
 	 */
-	tcm_vhost_fabric_configfs = fabric;
-	pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+	vhost_scsi_fabric_configfs = fabric;
+	pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
 	return 0;
 };
 
-static void tcm_vhost_deregister_configfs(void)
+static void vhost_scsi_deregister_configfs(void)
 {
-	if (!tcm_vhost_fabric_configfs)
+	if (!vhost_scsi_fabric_configfs)
 		return;
 
-	target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
-	tcm_vhost_fabric_configfs = NULL;
-	pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+	target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
+	vhost_scsi_fabric_configfs = NULL;
+	pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
 };
 
-static int __init tcm_vhost_init(void)
+static int __init vhost_scsi_init(void)
 {
 	int ret = -ENOMEM;
 	/*
 	 * Use our own dedicated workqueue for submitting I/O into
 	 * target core to avoid contention within system_wq.
 	 */
-	tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
-	if (!tcm_vhost_workqueue)
+	vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
+	if (!vhost_scsi_workqueue)
 		goto out;
 
 	ret = vhost_scsi_register();
 	if (ret < 0)
 		goto out_destroy_workqueue;
 
-	ret = tcm_vhost_register_configfs();
+	ret = vhost_scsi_register_configfs();
 	if (ret < 0)
 		goto out_vhost_scsi_deregister;
 
@@ -2392,20 +2406,20 @@
 out_vhost_scsi_deregister:
 	vhost_scsi_deregister();
 out_destroy_workqueue:
-	destroy_workqueue(tcm_vhost_workqueue);
+	destroy_workqueue(vhost_scsi_workqueue);
 out:
 	return ret;
 };
 
-static void tcm_vhost_exit(void)
+static void vhost_scsi_exit(void)
 {
-	tcm_vhost_deregister_configfs();
+	vhost_scsi_deregister_configfs();
 	vhost_scsi_deregister();
-	destroy_workqueue(tcm_vhost_workqueue);
+	destroy_workqueue(vhost_scsi_workqueue);
 };
 
 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
 MODULE_ALIAS("tcm_vhost");
 MODULE_LICENSE("GPL");
-module_init(tcm_vhost_init);
-module_exit(tcm_vhost_exit);
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 32c0b6b..9362424 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -599,6 +599,9 @@
 
 	len = clcdfb_snprintf_mode(NULL, 0, mode);
 	name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
 	clcdfb_snprintf_mode(name, len + 1, mode);
 	mode->name = name;
 
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 9533859..868facd 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -624,9 +624,6 @@
 	int num = 0, i, first = 1;
 	int ver, rev;
 
-	ver = edid[EDID_STRUCT_VERSION];
-	rev = edid[EDID_STRUCT_REVISION];
-
 	mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
 	if (mode == NULL)
 		return NULL;
@@ -637,6 +634,9 @@
 		return NULL;
 	}
 
+	ver = edid[EDID_STRUCT_VERSION];
+	rev = edid[EDID_STRUCT_REVISION];
+
 	*dbsize = 0;
 
 	DPRINTK("   Detailed Timings\n");
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c
index 5a2095a..1218655 100644
--- a/drivers/video/fbdev/omap2/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c
@@ -28,44 +28,22 @@
 #include <video/omapdss.h>
 #include "dss.h"
 
-static struct omap_dss_device *to_dss_device_sysfs(struct device *dev)
+static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = NULL;
-
-	for_each_dss_dev(dssdev) {
-		if (dssdev->dev == dev) {
-			omap_dss_put_device(dssdev);
-			return dssdev;
-		}
-	}
-
-	return NULL;
-}
-
-static ssize_t display_name_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			dssdev->name ?
 			dssdev->name : "");
 }
 
-static ssize_t display_enabled_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
-
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			omapdss_device_is_enabled(dssdev));
 }
 
-static ssize_t display_enabled_store(struct device *dev,
-		struct device_attribute *attr,
+static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
 		const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int r;
 	bool enable;
 
@@ -90,19 +68,16 @@
 	return size;
 }
 
-static ssize_t display_tear_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			dssdev->driver->get_te ?
 			dssdev->driver->get_te(dssdev) : 0);
 }
 
-static ssize_t display_tear_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_tear_store(struct omap_dss_device *dssdev,
+	const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int r;
 	bool te;
 
@@ -120,10 +95,8 @@
 	return size;
 }
 
-static ssize_t display_timings_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	struct omap_video_timings t;
 
 	if (!dssdev->driver->get_timings)
@@ -137,10 +110,9 @@
 			t.y_res, t.vfp, t.vbp, t.vsw);
 }
 
-static ssize_t display_timings_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_timings_store(struct omap_dss_device *dssdev,
+	const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	struct omap_video_timings t = dssdev->panel.timings;
 	int r, found;
 
@@ -176,10 +148,8 @@
 	return size;
 }
 
-static ssize_t display_rotate_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int rotate;
 	if (!dssdev->driver->get_rotate)
 		return -ENOENT;
@@ -187,10 +157,9 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
 }
 
-static ssize_t display_rotate_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
+	const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int rot, r;
 
 	if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
@@ -207,10 +176,8 @@
 	return size;
 }
 
-static ssize_t display_mirror_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int mirror;
 	if (!dssdev->driver->get_mirror)
 		return -ENOENT;
@@ -218,10 +185,9 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
 }
 
-static ssize_t display_mirror_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
+	const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	int r;
 	bool mirror;
 
@@ -239,10 +205,8 @@
 	return size;
 }
 
-static ssize_t display_wss_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	unsigned int wss;
 
 	if (!dssdev->driver->get_wss)
@@ -253,10 +217,9 @@
 	return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
 }
 
-static ssize_t display_wss_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t display_wss_store(struct omap_dss_device *dssdev,
+	const char *buf, size_t size)
 {
-	struct omap_dss_device *dssdev = to_dss_device_sysfs(dev);
 	u32 wss;
 	int r;
 
@@ -277,50 +240,94 @@
 	return size;
 }
 
-static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL);
-static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+struct display_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct omap_dss_device *, char *);
+	ssize_t	(*store)(struct omap_dss_device *, const char *, size_t);
+};
+
+#define DISPLAY_ATTR(_name, _mode, _show, _store) \
+	struct display_attribute display_attr_##_name = \
+	__ATTR(_name, _mode, _show, _store)
+
+static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
+static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
 		display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
 		display_tear_show, display_tear_store);
-static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
 		display_timings_show, display_timings_store);
-static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
 		display_rotate_show, display_rotate_store);
-static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
 		display_mirror_show, display_mirror_store);
-static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
 		display_wss_show, display_wss_store);
 
-static const struct attribute *display_sysfs_attrs[] = {
-	&dev_attr_display_name.attr,
-	&dev_attr_enabled.attr,
-	&dev_attr_tear_elim.attr,
-	&dev_attr_timings.attr,
-	&dev_attr_rotate.attr,
-	&dev_attr_mirror.attr,
-	&dev_attr_wss.attr,
+static struct attribute *display_sysfs_attrs[] = {
+	&display_attr_name.attr,
+	&display_attr_display_name.attr,
+	&display_attr_enabled.attr,
+	&display_attr_tear_elim.attr,
+	&display_attr_timings.attr,
+	&display_attr_rotate.attr,
+	&display_attr_mirror.attr,
+	&display_attr_wss.attr,
 	NULL
 };
 
+static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
+		char *buf)
+{
+	struct omap_dss_device *dssdev;
+	struct display_attribute *display_attr;
+
+	dssdev = container_of(kobj, struct omap_dss_device, kobj);
+	display_attr = container_of(attr, struct display_attribute, attr);
+
+	if (!display_attr->show)
+		return -ENOENT;
+
+	return display_attr->show(dssdev, buf);
+}
+
+static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
+		const char *buf, size_t size)
+{
+	struct omap_dss_device *dssdev;
+	struct display_attribute *display_attr;
+
+	dssdev = container_of(kobj, struct omap_dss_device, kobj);
+	display_attr = container_of(attr, struct display_attribute, attr);
+
+	if (!display_attr->store)
+		return -ENOENT;
+
+	return display_attr->store(dssdev, buf, size);
+}
+
+static const struct sysfs_ops display_sysfs_ops = {
+	.show = display_attr_show,
+	.store = display_attr_store,
+};
+
+static struct kobj_type display_ktype = {
+	.sysfs_ops = &display_sysfs_ops,
+	.default_attrs = display_sysfs_attrs,
+};
+
 int display_init_sysfs(struct platform_device *pdev)
 {
 	struct omap_dss_device *dssdev = NULL;
 	int r;
 
 	for_each_dss_dev(dssdev) {
-		struct kobject *kobj = &dssdev->dev->kobj;
-
-		r = sysfs_create_files(kobj, display_sysfs_attrs);
+		r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
+			&pdev->dev.kobj, dssdev->alias);
 		if (r) {
 			DSSERR("failed to create sysfs files\n");
-			goto err;
-		}
-
-		r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias);
-		if (r) {
-			sysfs_remove_files(kobj, display_sysfs_attrs);
-
-			DSSERR("failed to create sysfs display link\n");
+			omap_dss_put_device(dssdev);
 			goto err;
 		}
 	}
@@ -338,8 +345,12 @@
 	struct omap_dss_device *dssdev = NULL;
 
 	for_each_dss_dev(dssdev) {
-		sysfs_remove_link(&pdev->dev.kobj, dssdev->alias);
-		sysfs_remove_files(&dssdev->dev->kobj,
-				display_sysfs_attrs);
+		if (kobject_name(&dssdev->kobj) == NULL)
+			continue;
+
+		kobject_del(&dssdev->kobj);
+		kobject_put(&dssdev->kobj);
+
+		memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
 	}
 }
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0413157..6a356e3 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/balloon_compaction.h>
 #include <linux/oom.h>
+#include <linux/wait.h>
 
 /*
  * Balloon device works in 4K page units.  So each page is pointed to by
@@ -334,17 +335,25 @@
 static int balloon(void *_vballoon)
 {
 	struct virtio_balloon *vb = _vballoon;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	set_freezable();
 	while (!kthread_should_stop()) {
 		s64 diff;
 
 		try_to_freeze();
-		wait_event_interruptible(vb->config_change,
-					 (diff = towards_target(vb)) != 0
-					 || vb->need_stats_update
-					 || kthread_should_stop()
-					 || freezing(current));
+
+		add_wait_queue(&vb->config_change, &wait);
+		for (;;) {
+			if ((diff = towards_target(vb)) != 0 ||
+			    vb->need_stats_update ||
+			    kthread_should_stop() ||
+			    freezing(current))
+				break;
+			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+		}
+		remove_wait_queue(&vb->config_change, &wait);
+
 		if (vb->need_stats_update)
 			stats_handle_request(vb);
 		if (diff > 0)
@@ -499,6 +508,8 @@
 	if (err < 0)
 		goto out_oom_notify;
 
+	virtio_device_ready(vdev);
+
 	vb->thread = kthread_run(balloon, vb, "vballoon");
 	if (IS_ERR(vb->thread)) {
 		err = PTR_ERR(vb->thread);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad5698..6010d7e 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -156,22 +156,95 @@
 		   void *buf, unsigned len)
 {
 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-	u8 *ptr = buf;
-	int i;
+	void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
+	u8 b;
+	__le16 w;
+	__le32 l;
 
-	for (i = 0; i < len; i++)
-		ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+	if (vm_dev->version == 1) {
+		u8 *ptr = buf;
+		int i;
+
+		for (i = 0; i < len; i++)
+			ptr[i] = readb(base + offset + i);
+		return;
+	}
+
+	switch (len) {
+	case 1:
+		b = readb(base + offset);
+		memcpy(buf, &b, sizeof b);
+		break;
+	case 2:
+		w = cpu_to_le16(readw(base + offset));
+		memcpy(buf, &w, sizeof w);
+		break;
+	case 4:
+		l = cpu_to_le32(readl(base + offset));
+		memcpy(buf, &l, sizeof l);
+		break;
+	case 8:
+		l = cpu_to_le32(readl(base + offset));
+		memcpy(buf, &l, sizeof l);
+		l = cpu_to_le32(ioread32(base + offset + sizeof l));
+		memcpy(buf + sizeof l, &l, sizeof l);
+		break;
+	default:
+		BUG();
+	}
 }
 
 static void vm_set(struct virtio_device *vdev, unsigned offset,
 		   const void *buf, unsigned len)
 {
 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-	const u8 *ptr = buf;
-	int i;
+	void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
+	u8 b;
+	__le16 w;
+	__le32 l;
 
-	for (i = 0; i < len; i++)
-		writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
+	if (vm_dev->version == 1) {
+		const u8 *ptr = buf;
+		int i;
+
+		for (i = 0; i < len; i++)
+			writeb(ptr[i], base + offset + i);
+
+		return;
+	}
+
+	switch (len) {
+	case 1:
+		memcpy(&b, buf, sizeof b);
+		writeb(b, base + offset);
+		break;
+	case 2:
+		memcpy(&w, buf, sizeof w);
+		writew(le16_to_cpu(w), base + offset);
+		break;
+	case 4:
+		memcpy(&l, buf, sizeof l);
+		writel(le32_to_cpu(l), base + offset);
+		break;
+	case 8:
+		memcpy(&l, buf, sizeof l);
+		writel(le32_to_cpu(l), base + offset);
+		memcpy(&l, buf + sizeof l, sizeof l);
+		writel(le32_to_cpu(l), base + offset + sizeof l);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static u32 vm_generation(struct virtio_device *vdev)
+{
+	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+	if (vm_dev->version == 1)
+		return 0;
+	else
+		return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
 }
 
 static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@
 static const struct virtio_config_ops virtio_mmio_config_ops = {
 	.get		= vm_get,
 	.set		= vm_set,
+	.generation	= vm_generation,
 	.get_status	= vm_get_status,
 	.set_status	= vm_set_status,
 	.reset		= vm_reset,
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 6df9405..1443b3c 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -208,7 +208,8 @@
 
 	if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
 		err = request_irq(wdt->irq, wdt_interrupt,
-				  IRQF_SHARED | IRQF_IRQPOLL,
+				  IRQF_SHARED | IRQF_IRQPOLL |
+				  IRQF_NO_SUSPEND,
 				  pdev->name, wdt);
 		if (err)
 			return err;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 2140398..2ccd359 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -2,7 +2,7 @@
 obj-$(CONFIG_HOTPLUG_CPU)		+= cpu_hotplug.o
 endif
 obj-$(CONFIG_X86)			+= fallback.o
-obj-y	+= grant-table.o features.o balloon.o manage.o
+obj-y	+= grant-table.o features.o balloon.o manage.o preempt.o
 obj-y	+= events/
 obj-y	+= xenbus/
 
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b4bca2d..70fba97 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -526,20 +526,26 @@
 	pirq_query_unmask(irq);
 
 	rc = set_evtchn_to_irq(evtchn, irq);
-	if (rc != 0) {
-		pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
-		       irq, rc);
-		xen_evtchn_close(evtchn);
-		return 0;
-	}
+	if (rc)
+		goto err;
+
 	bind_evtchn_to_cpu(evtchn, 0);
 	info->evtchn = evtchn;
 
+	rc = xen_evtchn_port_setup(info);
+	if (rc)
+		goto err;
+
 out:
 	unmask_evtchn(evtchn);
 	eoi_pirq(irq_get_irq_data(irq));
 
 	return 0;
+
+err:
+	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+	xen_evtchn_close(evtchn);
+	return 0;
 }
 
 static unsigned int startup_pirq(struct irq_data *data)
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
new file mode 100644
index 0000000..a1800c1
--- /dev/null
+++ b/drivers/xen/preempt.c
@@ -0,0 +1,44 @@
+/*
+ * Preemptible hypercalls
+ *
+ * Copyright (C) 2014 Citrix Systems R&D ltd.
+ *
+ * This source code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <xen/xen-ops.h>
+
+#ifndef CONFIG_PREEMPT
+
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+asmlinkage __visible void xen_maybe_preempt_hcall(void)
+{
+	if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+		     && should_resched())) {
+		/*
+		 * Clear flag as we may be rescheduled on a different
+		 * cpu.
+		 */
+		__this_cpu_write(xen_in_preemptible_hcall, false);
+		_cond_resched();
+		__this_cpu_write(xen_in_preemptible_hcall, true);
+	}
+}
+#endif /* CONFIG_PREEMPT */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 569a13b..59ac71c 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -56,10 +56,12 @@
 	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
 		return -EFAULT;
 
+	xen_preemptible_hcall_begin();
 	ret = privcmd_call(hypercall.op,
 			   hypercall.arg[0], hypercall.arg[1],
 			   hypercall.arg[2], hypercall.arg[3],
 			   hypercall.arg[4]);
+	xen_preemptible_hcall_end();
 
 	return ret;
 }
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 46ae0f9..75fe3d4 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
 #include "conf_space.h"
 #include "conf_space_quirks.h"
 
-static bool permissive;
+bool permissive;
 module_param(permissive, bool, 0644);
 
 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index e56c934..2e1d73d 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,6 +64,8 @@
 	void *data;
 };
 
+extern bool permissive;
+
 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
 
 /* Add fields to a device - the add_fields macro expects to get a pointer to
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c5ee825..2d73693 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -11,6 +11,10 @@
 #include "pciback.h"
 #include "conf_space.h"
 
+struct pci_cmd_info {
+	u16 val;
+};
+
 struct pci_bar_info {
 	u32 val;
 	u32 len_val;
@@ -20,21 +24,35 @@
 #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
 #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
 
+/* Bits guests are allowed to control in permissive mode. */
+#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
+			   PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
+			   PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
+
+static void *command_init(struct pci_dev *dev, int offset)
+{
+	struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+	int err;
+
+	if (!cmd)
+		return ERR_PTR(-ENOMEM);
+
+	err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
+	if (err) {
+		kfree(cmd);
+		return ERR_PTR(err);
+	}
+
+	return cmd;
+}
+
 static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
 {
-	int i;
-	int ret;
+	int ret = pci_read_config_word(dev, offset, value);
+	const struct pci_cmd_info *cmd = data;
 
-	ret = xen_pcibk_read_config_word(dev, offset, value, data);
-	if (!pci_is_enabled(dev))
-		return ret;
-
-	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-		if (dev->resource[i].flags & IORESOURCE_IO)
-			*value |= PCI_COMMAND_IO;
-		if (dev->resource[i].flags & IORESOURCE_MEM)
-			*value |= PCI_COMMAND_MEMORY;
-	}
+	*value &= PCI_COMMAND_GUEST;
+	*value |= cmd->val & ~PCI_COMMAND_GUEST;
 
 	return ret;
 }
@@ -43,6 +61,8 @@
 {
 	struct xen_pcibk_dev_data *dev_data;
 	int err;
+	u16 val;
+	struct pci_cmd_info *cmd = data;
 
 	dev_data = pci_get_drvdata(dev);
 	if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
@@ -83,6 +103,19 @@
 		}
 	}
 
+	cmd->val = value;
+
+	if (!permissive && (!dev_data || !dev_data->permissive))
+		return 0;
+
+	/* Only allow the guest to control certain bits. */
+	err = pci_read_config_word(dev, offset, &val);
+	if (err || val == value)
+		return err;
+
+	value &= PCI_COMMAND_GUEST;
+	value |= val & ~PCI_COMMAND_GUEST;
+
 	return pci_write_config_word(dev, offset, value);
 }
 
@@ -282,6 +315,8 @@
 	{
 	 .offset    = PCI_COMMAND,
 	 .size      = 2,
+	 .init      = command_init,
+	 .release   = bar_release,
 	 .u.w.read  = command_read,
 	 .u.w.write = command_write,
 	},
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 61653a0..9faca6a 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -709,12 +709,11 @@
 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
 {
 	struct vscsiif_back_ring *ring = &info->ring;
-	struct vscsiif_request *ring_req;
+	struct vscsiif_request ring_req;
 	struct vscsibk_pend *pending_req;
 	RING_IDX rc, rp;
 	int err, more_to_do;
 	uint32_t result;
-	uint8_t act;
 
 	rc = ring->req_cons;
 	rp = ring->sring->req_prod;
@@ -735,11 +734,10 @@
 		if (!pending_req)
 			return 1;
 
-		ring_req = RING_GET_REQUEST(ring, rc);
+		ring_req = *RING_GET_REQUEST(ring, rc);
 		ring->req_cons = ++rc;
 
-		act = ring_req->act;
-		err = prepare_pending_reqs(info, ring_req, pending_req);
+		err = prepare_pending_reqs(info, &ring_req, pending_req);
 		if (err) {
 			switch (err) {
 			case -ENODEV:
@@ -755,9 +753,9 @@
 			return 1;
 		}
 
-		switch (act) {
+		switch (ring_req.act) {
 		case VSCSIIF_ACT_SCSI_CDB:
-			if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+			if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
 				scsiback_fast_flush_area(pending_req);
 				scsiback_do_resp_with_sense(NULL,
 					DRIVER_ERROR << 24, 0, pending_req);
@@ -768,7 +766,7 @@
 			break;
 		case VSCSIIF_ACT_SCSI_ABORT:
 			scsiback_device_action(pending_req, TMR_ABORT_TASK,
-				ring_req->ref_rqid);
+				ring_req.ref_rqid);
 			break;
 		case VSCSIIF_ACT_SCSI_RESET:
 			scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9ee5343..3662f1d 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1127,7 +1127,7 @@
 	}
 
 	/* Write all dirty data */
-	if (S_ISREG(dentry->d_inode->i_mode))
+	if (d_is_reg(dentry))
 		filemap_write_and_wait(dentry->d_inode->i_mapping);
 
 	retval = p9_client_wstat(fid, &wstat);
diff --git a/fs/aio.c b/fs/aio.c
index 118a2e0..f8e52a1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@
 
 	ret = -EINVAL;
 	if (unlikely(ctx || nr_events == 0)) {
-		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+		pr_debug("EINVAL: ctx %lu nr_events %u\n",
 		         ctx, nr_events);
 		goto out;
 	}
@@ -1333,7 +1333,7 @@
 
 		return ret;
 	}
-	pr_debug("EINVAL: io_destroy: invalid context id\n");
+	pr_debug("EINVAL: invalid context id\n");
 	return -EINVAL;
 }
 
@@ -1515,7 +1515,7 @@
 	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
 	    ((ssize_t)iocb->aio_nbytes < 0)
 	   )) {
-		pr_debug("EINVAL: io_submit: overflow check\n");
+		pr_debug("EINVAL: overflow check\n");
 		return -EINVAL;
 	}
 
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index aaf96cb..ac7d921 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@
  */
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
 {
-	struct autofs_dev_ioctl tmp;
+	struct autofs_dev_ioctl tmp, *res;
 
 	if (copy_from_user(&tmp, in, sizeof(tmp)))
 		return ERR_PTR(-EFAULT);
@@ -106,7 +106,11 @@
 	if (tmp.size > (PATH_MAX + sizeof(tmp)))
 		return ERR_PTR(-ENAMETOOLONG);
 
-	return memdup_user(in, tmp.size);
+	res = memdup_user(in, tmp.size);
+	if (!IS_ERR(res))
+		res->size = tmp.size;
+
+	return res;
 }
 
 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index bfdbaba..11dd118 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -374,7 +374,7 @@
 		return NULL;
 	}
 
-	if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+	if (dentry->d_inode && d_is_symlink(dentry)) {
 		DPRINTK("checking symlink %p %pd", dentry, dentry);
 		/*
 		 * A symlink can't be "busy" in the usual sense so
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index dbb5b72..7e44fdd 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -108,7 +108,7 @@
 	struct dentry *dentry = file->f_path.dentry;
 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
 
-	DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry);
+	DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
 
 	if (autofs4_oz_mode(sbi))
 		goto out;
@@ -371,7 +371,7 @@
 	 * having d_mountpoint() true, so there's no need to call back
 	 * to the daemon.
 	 */
-	if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+	if (dentry->d_inode && d_is_symlink(dentry)) {
 		spin_unlock(&sbi->fs_lock);
 		goto done;
 	}
@@ -485,7 +485,7 @@
 		 * an incorrect ELOOP error return.
 		 */
 		if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
-		    (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+		    (dentry->d_inode && d_is_symlink(dentry)))
 			status = -EISDIR;
 	}
 	spin_unlock(&sbi->fs_lock);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index afd2b44..861b1e1 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -15,161 +15,14 @@
 #include <linux/namei.h>
 #include <linux/poll.h>
 
-
-static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
-{
-	return -EIO;
-}
-
-static ssize_t bad_file_read(struct file *filp, char __user *buf,
-			size_t size, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_write(struct file *filp, const char __user *buf,
-			size_t siz, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-			unsigned long nr_segs, loff_t pos)
-{
-	return -EIO;
-}
-
-static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-			unsigned long nr_segs, loff_t pos)
-{
-	return -EIO;
-}
-
-static int bad_file_readdir(struct file *file, struct dir_context *ctx)
-{
-	return -EIO;
-}
-
-static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
-{
-	return POLLERR;
-}
-
-static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
-			unsigned long arg)
-{
-	return -EIO;
-}
-
-static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
-			unsigned long arg)
-{
-	return -EIO;
-}
-
-static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	return -EIO;
-}
-
 static int bad_file_open(struct inode *inode, struct file *filp)
 {
 	return -EIO;
 }
 
-static int bad_file_flush(struct file *file, fl_owner_t id)
-{
-	return -EIO;
-}
-
-static int bad_file_release(struct inode *inode, struct file *filp)
-{
-	return -EIO;
-}
-
-static int bad_file_fsync(struct file *file, loff_t start, loff_t end,
-			  int datasync)
-{
-	return -EIO;
-}
-
-static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
-{
-	return -EIO;
-}
-
-static int bad_file_fasync(int fd, struct file *filp, int on)
-{
-	return -EIO;
-}
-
-static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
-{
-	return -EIO;
-}
-
-static ssize_t bad_file_sendpage(struct file *file, struct page *page,
-			int off, size_t len, loff_t *pos, int more)
-{
-	return -EIO;
-}
-
-static unsigned long bad_file_get_unmapped_area(struct file *file,
-				unsigned long addr, unsigned long len,
-				unsigned long pgoff, unsigned long flags)
-{
-	return -EIO;
-}
-
-static int bad_file_check_flags(int flags)
-{
-	return -EIO;
-}
-
-static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
-{
-	return -EIO;
-}
-
-static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
-			struct file *out, loff_t *ppos, size_t len,
-			unsigned int flags)
-{
-	return -EIO;
-}
-
-static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
-			struct pipe_inode_info *pipe, size_t len,
-			unsigned int flags)
-{
-	return -EIO;
-}
-
 static const struct file_operations bad_file_ops =
 {
-	.llseek		= bad_file_llseek,
-	.read		= bad_file_read,
-	.write		= bad_file_write,
-	.aio_read	= bad_file_aio_read,
-	.aio_write	= bad_file_aio_write,
-	.iterate	= bad_file_readdir,
-	.poll		= bad_file_poll,
-	.unlocked_ioctl	= bad_file_unlocked_ioctl,
-	.compat_ioctl	= bad_file_compat_ioctl,
-	.mmap		= bad_file_mmap,
 	.open		= bad_file_open,
-	.flush		= bad_file_flush,
-	.release	= bad_file_release,
-	.fsync		= bad_file_fsync,
-	.aio_fsync	= bad_file_aio_fsync,
-	.fasync		= bad_file_fasync,
-	.lock		= bad_file_lock,
-	.sendpage	= bad_file_sendpage,
-	.get_unmapped_area = bad_file_get_unmapped_area,
-	.check_flags	= bad_file_check_flags,
-	.flock		= bad_file_flock,
-	.splice_write	= bad_file_splice_write,
-	.splice_read	= bad_file_splice_read,
 };
 
 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 02b1691..995986b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -645,11 +645,12 @@
 
 static unsigned long randomize_stack_top(unsigned long stack_top)
 {
-	unsigned int random_variable = 0;
+	unsigned long random_variable = 0;
 
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
-		random_variable = get_random_int() & STACK_RND_MASK;
+		random_variable = (unsigned long) get_random_int();
+		random_variable &= STACK_RND_MASK;
 		random_variable <<= PAGE_SHIFT;
 	}
 #ifdef CONFIG_STACK_GROWSUP
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9936421..6d67f32 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1645,14 +1645,14 @@
 
 	parent_nritems = btrfs_header_nritems(parent);
 	blocksize = root->nodesize;
-	end_slot = parent_nritems;
+	end_slot = parent_nritems - 1;
 
-	if (parent_nritems == 1)
+	if (parent_nritems <= 1)
 		return 0;
 
 	btrfs_set_lock_blocking(parent);
 
-	for (i = start_slot; i < end_slot; i++) {
+	for (i = start_slot; i <= end_slot; i++) {
 		int close = 1;
 
 		btrfs_node_key(parent, &disk_key, i);
@@ -1669,7 +1669,7 @@
 			other = btrfs_node_blockptr(parent, i - 1);
 			close = close_blocks(blocknr, other, blocksize);
 		}
-		if (!close && i < end_slot - 2) {
+		if (!close && i < end_slot) {
 			other = btrfs_node_blockptr(parent, i + 1);
 			close = close_blocks(blocknr, other, blocksize);
 		}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 571f402..6f08045 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3208,6 +3208,8 @@
 		return 0;
 	}
 
+	if (trans->aborted)
+		return 0;
 again:
 	inode = lookup_free_space_inode(root, block_group, path);
 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
@@ -3243,6 +3245,20 @@
 	 */
 	BTRFS_I(inode)->generation = 0;
 	ret = btrfs_update_inode(trans, root, inode);
+	if (ret) {
+		/*
+		 * So theoretically we could recover from this, simply set the
+		 * super cache generation to 0 so we know to invalidate the
+		 * cache, but then we'd have to keep track of the block groups
+		 * that fail this way so we know we _have_ to reset this cache
+		 * before the next commit or risk reading stale cache.  So to
+		 * limit our exposure to horrible edge cases lets just abort the
+		 * transaction, this only happens in really bad situations
+		 * anyway.
+		 */
+		btrfs_abort_transaction(trans, root, ret);
+		goto out_put;
+	}
 	WARN_ON(ret);
 
 	if (i_size_read(inode) > 0) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index b78bbba..30982bb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1811,22 +1811,10 @@
 	mutex_unlock(&inode->i_mutex);
 
 	/*
-	 * we want to make sure fsync finds this change
-	 * but we haven't joined a transaction running right now.
-	 *
-	 * Later on, someone is sure to update the inode and get the
-	 * real transid recorded.
-	 *
-	 * We set last_trans now to the fs_info generation + 1,
-	 * this will either be one more than the running transaction
-	 * or the generation used for the next transaction if there isn't
-	 * one running right now.
-	 *
 	 * We also have to set last_sub_trans to the current log transid,
 	 * otherwise subsequent syncs to a file that's been synced in this
 	 * transaction will appear to have already occured.
 	 */
-	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
 	if (num_written > 0) {
 		err = generic_write_sync(file, pos, num_written);
@@ -1959,25 +1947,37 @@
 	atomic_inc(&root->log_batch);
 
 	/*
-	 * check the transaction that last modified this inode
-	 * and see if its already been committed
-	 */
-	if (!BTRFS_I(inode)->last_trans) {
-		mutex_unlock(&inode->i_mutex);
-		goto out;
-	}
-
-	/*
-	 * if the last transaction that changed this file was before
-	 * the current transaction, we can bail out now without any
-	 * syncing
+	 * If the last transaction that changed this file was before the current
+	 * transaction and we have the full sync flag set in our inode, we can
+	 * bail out now without any syncing.
+	 *
+	 * Note that we can't bail out if the full sync flag isn't set. This is
+	 * because when the full sync flag is set we start all ordered extents
+	 * and wait for them to fully complete - when they complete they update
+	 * the inode's last_trans field through:
+	 *
+	 *     btrfs_finish_ordered_io() ->
+	 *         btrfs_update_inode_fallback() ->
+	 *             btrfs_update_inode() ->
+	 *                 btrfs_set_inode_last_trans()
+	 *
+	 * So we are sure that last_trans is up to date and can do this check to
+	 * bail out safely. For the fast path, when the full sync flag is not
+	 * set in our inode, we can not do it because we start only our ordered
+	 * extents and don't wait for them to complete (that is when
+	 * btrfs_finish_ordered_io runs), so here at this point their last_trans
+	 * value might be less than or equals to fs_info->last_trans_committed,
+	 * and setting a speculative last_trans for an inode when a buffered
+	 * write is made (such as fs_info->generation + 1 for example) would not
+	 * be reliable since after setting the value and before fsync is called
+	 * any number of transactions can start and commit (transaction kthread
+	 * commits the current transaction periodically), and a transaction
+	 * commit does not start nor waits for ordered extents to complete.
 	 */
 	smp_mb();
 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
-	    BTRFS_I(inode)->last_trans <=
-	    root->fs_info->last_trans_committed) {
-		BTRFS_I(inode)->last_trans = 0;
-
+	    (full_sync && BTRFS_I(inode)->last_trans <=
+	     root->fs_info->last_trans_committed)) {
 		/*
 		 * We'v had everything committed since the last time we were
 		 * modified so clear this flag in case it was set for whatever
@@ -2275,6 +2275,8 @@
 	bool same_page;
 	bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
 	u64 ino_size;
+	bool truncated_page = false;
+	bool updated_inode = false;
 
 	ret = btrfs_wait_ordered_range(inode, offset, len);
 	if (ret)
@@ -2306,13 +2308,18 @@
 	 * entire page.
 	 */
 	if (same_page && len < PAGE_CACHE_SIZE) {
-		if (offset < ino_size)
+		if (offset < ino_size) {
+			truncated_page = true;
 			ret = btrfs_truncate_page(inode, offset, len, 0);
+		} else {
+			ret = 0;
+		}
 		goto out_only_mutex;
 	}
 
 	/* zero back part of the first page */
 	if (offset < ino_size) {
+		truncated_page = true;
 		ret = btrfs_truncate_page(inode, offset, 0, 0);
 		if (ret) {
 			mutex_unlock(&inode->i_mutex);
@@ -2348,6 +2355,7 @@
 		if (!ret) {
 			/* zero the front end of the last page */
 			if (tail_start + tail_len < ino_size) {
+				truncated_page = true;
 				ret = btrfs_truncate_page(inode,
 						tail_start + tail_len, 0, 1);
 				if (ret)
@@ -2357,8 +2365,8 @@
 	}
 
 	if (lockend < lockstart) {
-		mutex_unlock(&inode->i_mutex);
-		return 0;
+		ret = 0;
+		goto out_only_mutex;
 	}
 
 	while (1) {
@@ -2506,6 +2514,7 @@
 
 	trans->block_rsv = &root->fs_info->trans_block_rsv;
 	ret = btrfs_update_inode(trans, root, inode);
+	updated_inode = true;
 	btrfs_end_transaction(trans, root);
 	btrfs_btree_balance_dirty(root);
 out_free:
@@ -2515,6 +2524,22 @@
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 			     &cached_state, GFP_NOFS);
 out_only_mutex:
+	if (!updated_inode && truncated_page && !ret && !err) {
+		/*
+		 * If we only end up zeroing part of a page, we still need to
+		 * update the inode item, so that all the time fields are
+		 * updated as well as the necessary btrfs inode in memory fields
+		 * for detecting, at fsync time, if the inode isn't yet in the
+		 * log tree or it's there but not up to date.
+		 */
+		trans = btrfs_start_transaction(root, 1);
+		if (IS_ERR(trans)) {
+			err = PTR_ERR(trans);
+		} else {
+			err = btrfs_update_inode(trans, root, inode);
+			ret = btrfs_end_transaction(trans, root);
+		}
+	}
 	mutex_unlock(&inode->i_mutex);
 	if (ret && !err)
 		err = ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a85c23d..da828cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7285,7 +7285,6 @@
 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
 	     em->block_start != EXTENT_MAP_HOLE)) {
 		int type;
-		int ret;
 		u64 block_start, orig_start, orig_block_len, ram_bytes;
 
 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d49fe8a..74609b9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -776,11 +776,11 @@
 	    IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
 		return -EPERM;
 	if (isdir) {
-		if (!S_ISDIR(victim->d_inode->i_mode))
+		if (!d_is_dir(victim))
 			return -ENOTDIR;
 		if (IS_ROOT(victim))
 			return -EBUSY;
-	} else if (S_ISDIR(victim->d_inode->i_mode))
+	} else if (d_is_dir(victim))
 		return -EISDIR;
 	if (IS_DEADDIR(dir))
 		return -ENOENT;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 534544e..157cc54 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -452,9 +452,7 @@
 			continue;
 		if (entry_end(ordered) <= start)
 			break;
-		if (!list_empty(&ordered->log_list))
-			continue;
-		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+		if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 			continue;
 		list_add(&ordered->log_list, logged_list);
 		atomic_inc(&ordered->refs);
@@ -511,8 +509,7 @@
 		wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
 						   &ordered->flags));
 
-		if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
-			list_add_tail(&ordered->trans_list, &trans->ordered);
+		list_add_tail(&ordered->trans_list, &trans->ordered);
 		spin_lock_irq(&log->log_extents_lock[index]);
 	}
 	spin_unlock_irq(&log->log_extents_lock[index]);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index fe58572..d6033f5 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -230,6 +230,7 @@
 	u64 parent_ino;
 	u64 ino;
 	u64 gen;
+	bool is_orphan;
 	struct list_head update_refs;
 };
 
@@ -2984,7 +2985,8 @@
 				u64 ino_gen,
 				u64 parent_ino,
 				struct list_head *new_refs,
-				struct list_head *deleted_refs)
+				struct list_head *deleted_refs,
+				const bool is_orphan)
 {
 	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
 	struct rb_node *parent = NULL;
@@ -2999,6 +3001,7 @@
 	pm->parent_ino = parent_ino;
 	pm->ino = ino;
 	pm->gen = ino_gen;
+	pm->is_orphan = is_orphan;
 	INIT_LIST_HEAD(&pm->list);
 	INIT_LIST_HEAD(&pm->update_refs);
 	RB_CLEAR_NODE(&pm->node);
@@ -3131,16 +3134,20 @@
 	rmdir_ino = dm->rmdir_ino;
 	free_waiting_dir_move(sctx, dm);
 
-	ret = get_first_ref(sctx->parent_root, pm->ino,
-			    &parent_ino, &parent_gen, name);
-	if (ret < 0)
-		goto out;
-
-	ret = get_cur_path(sctx, parent_ino, parent_gen,
-			   from_path);
-	if (ret < 0)
-		goto out;
-	ret = fs_path_add_path(from_path, name);
+	if (pm->is_orphan) {
+		ret = gen_unique_name(sctx, pm->ino,
+				      pm->gen, from_path);
+	} else {
+		ret = get_first_ref(sctx->parent_root, pm->ino,
+				    &parent_ino, &parent_gen, name);
+		if (ret < 0)
+			goto out;
+		ret = get_cur_path(sctx, parent_ino, parent_gen,
+				   from_path);
+		if (ret < 0)
+			goto out;
+		ret = fs_path_add_path(from_path, name);
+	}
 	if (ret < 0)
 		goto out;
 
@@ -3150,7 +3157,8 @@
 		LIST_HEAD(deleted_refs);
 		ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
 		ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
-					   &pm->update_refs, &deleted_refs);
+					   &pm->update_refs, &deleted_refs,
+					   pm->is_orphan);
 		if (ret < 0)
 			goto out;
 		if (rmdir_ino) {
@@ -3283,6 +3291,127 @@
 	return ret;
 }
 
+/*
+ * We might need to delay a directory rename even when no ancestor directory
+ * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
+ * renamed. This happens when we rename a directory to the old name (the name
+ * in the parent root) of some other unrelated directory that got its rename
+ * delayed due to some ancestor with higher number that got renamed.
+ *
+ * Example:
+ *
+ * Parent snapshot:
+ * .                                       (ino 256)
+ * |---- a/                                (ino 257)
+ * |     |---- file                        (ino 260)
+ * |
+ * |---- b/                                (ino 258)
+ * |---- c/                                (ino 259)
+ *
+ * Send snapshot:
+ * .                                       (ino 256)
+ * |---- a/                                (ino 258)
+ * |---- x/                                (ino 259)
+ *       |---- y/                          (ino 257)
+ *             |----- file                 (ino 260)
+ *
+ * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
+ * from 'a' to 'x/y' happening first, which in turn depends on the rename of
+ * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
+ * must issue is:
+ *
+ * 1 - rename 259 from 'c' to 'x'
+ * 2 - rename 257 from 'a' to 'x/y'
+ * 3 - rename 258 from 'b' to 'a'
+ *
+ * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
+ * be done right away and < 0 on error.
+ */
+static int wait_for_dest_dir_move(struct send_ctx *sctx,
+				  struct recorded_ref *parent_ref,
+				  const bool is_orphan)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_key di_key;
+	struct btrfs_dir_item *di;
+	u64 left_gen;
+	u64 right_gen;
+	int ret = 0;
+
+	if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
+		return 0;
+
+	path = alloc_path_for_send();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = parent_ref->dir;
+	key.type = BTRFS_DIR_ITEM_KEY;
+	key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
+
+	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
+	if (ret < 0) {
+		goto out;
+	} else if (ret > 0) {
+		ret = 0;
+		goto out;
+	}
+
+	di = btrfs_match_dir_item_name(sctx->parent_root, path,
+				       parent_ref->name, parent_ref->name_len);
+	if (!di) {
+		ret = 0;
+		goto out;
+	}
+	/*
+	 * di_key.objectid has the number of the inode that has a dentry in the
+	 * parent directory with the same name that sctx->cur_ino is being
+	 * renamed to. We need to check if that inode is in the send root as
+	 * well and if it is currently marked as an inode with a pending rename,
+	 * if it is, we need to delay the rename of sctx->cur_ino as well, so
+	 * that it happens after that other inode is renamed.
+	 */
+	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
+	if (di_key.type != BTRFS_INODE_ITEM_KEY) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
+			     &left_gen, NULL, NULL, NULL, NULL);
+	if (ret < 0)
+		goto out;
+	ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
+			     &right_gen, NULL, NULL, NULL, NULL);
+	if (ret < 0) {
+		if (ret == -ENOENT)
+			ret = 0;
+		goto out;
+	}
+
+	/* Different inode, no need to delay the rename of sctx->cur_ino */
+	if (right_gen != left_gen) {
+		ret = 0;
+		goto out;
+	}
+
+	if (is_waiting_for_move(sctx, di_key.objectid)) {
+		ret = add_pending_dir_move(sctx,
+					   sctx->cur_ino,
+					   sctx->cur_inode_gen,
+					   di_key.objectid,
+					   &sctx->new_refs,
+					   &sctx->deleted_refs,
+					   is_orphan);
+		if (!ret)
+			ret = 1;
+	}
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
 static int wait_for_parent_move(struct send_ctx *sctx,
 				struct recorded_ref *parent_ref)
 {
@@ -3349,7 +3478,8 @@
 					   sctx->cur_inode_gen,
 					   ino,
 					   &sctx->new_refs,
-					   &sctx->deleted_refs);
+					   &sctx->deleted_refs,
+					   false);
 		if (!ret)
 			ret = 1;
 	}
@@ -3372,6 +3502,7 @@
 	int did_overwrite = 0;
 	int is_orphan = 0;
 	u64 last_dir_ino_rm = 0;
+	bool can_rename = true;
 
 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
 
@@ -3490,12 +3621,22 @@
 			}
 		}
 
+		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
+			ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
+			if (ret < 0)
+				goto out;
+			if (ret == 1) {
+				can_rename = false;
+				*pending_move = 1;
+			}
+		}
+
 		/*
 		 * link/move the ref to the new place. If we have an orphan
 		 * inode, move it and update valid_path. If not, link or move
 		 * it depending on the inode mode.
 		 */
-		if (is_orphan) {
+		if (is_orphan && can_rename) {
 			ret = send_rename(sctx, valid_path, cur->full_path);
 			if (ret < 0)
 				goto out;
@@ -3503,7 +3644,7 @@
 			ret = fs_path_copy(valid_path, cur->full_path);
 			if (ret < 0)
 				goto out;
-		} else {
+		} else if (can_rename) {
 			if (S_ISDIR(sctx->cur_inode_mode)) {
 				/*
 				 * Dirs can't be linked, so move it. For moved
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 7e80f32..88e51ad 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1052,9 +1052,6 @@
 		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
 		if (ret)
 			return ret;
-		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-		if (ret)
-			return ret;
 	}
 
 	return 0;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9a37f8b..c5b8ba3 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1012,7 +1012,7 @@
 		base = btrfs_item_ptr_offset(leaf, path->slots[0]);
 
 		while (cur_offset < item_size) {
-			extref = (struct btrfs_inode_extref *)base + cur_offset;
+			extref = (struct btrfs_inode_extref *)(base + cur_offset);
 
 			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
 
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index cd4d131..8222f6f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4903,10 +4903,17 @@
 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
 {
 	struct btrfs_bio *bbio = kzalloc(
+		 /* the size of the btrfs_bio */
 		sizeof(struct btrfs_bio) +
+		/* plus the variable array for the stripes */
 		sizeof(struct btrfs_bio_stripe) * (total_stripes) +
+		/* plus the variable array for the tgt dev */
 		sizeof(int) * (real_stripes) +
-		sizeof(u64) * (real_stripes),
+		/*
+		 * plus the raid_map, which includes both the tgt dev
+		 * and the stripes
+		 */
+		sizeof(u64) * (total_stripes),
 		GFP_NOFS);
 	if (!bbio)
 		return NULL;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 47b1946..883b936 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -111,6 +111,8 @@
 					name, name_len, -1);
 		if (!di && (flags & XATTR_REPLACE))
 			ret = -ENODATA;
+		else if (IS_ERR(di))
+			ret = PTR_ERR(di);
 		else if (di)
 			ret = btrfs_delete_one_dir_name(trans, root, path, di);
 		goto out;
@@ -127,10 +129,12 @@
 		ASSERT(mutex_is_locked(&inode->i_mutex));
 		di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
 					name, name_len, 0);
-		if (!di) {
+		if (!di)
 			ret = -ENODATA;
+		else if (IS_ERR(di))
+			ret = PTR_ERR(di);
+		if (ret)
 			goto out;
-		}
 		btrfs_release_path(path);
 		di = NULL;
 	}
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index ce1b115..f601def 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -574,7 +574,7 @@
 	/* extract the directory dentry from the cwd */
 	get_fs_pwd(current->fs, &path);
 
-	if (!S_ISDIR(path.dentry->d_inode->i_mode))
+	if (!d_can_lookup(path.dentry))
 		goto notdir;
 
 	cachefiles_begin_secure(cache, &saved_cred);
@@ -646,7 +646,7 @@
 	/* extract the directory dentry from the cwd */
 	get_fs_pwd(current->fs, &path);
 
-	if (!S_ISDIR(path.dentry->d_inode->i_mode))
+	if (!d_can_lookup(path.dentry))
 		goto notdir;
 
 	cachefiles_begin_secure(cache, &saved_cred);
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 1c7293c..2324262 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -437,7 +437,7 @@
 	if (!object->backer)
 		return -ENOBUFS;
 
-	ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+	ASSERT(d_is_reg(object->backer));
 
 	fscache_set_store_limit(&object->fscache, ni_size);
 
@@ -501,7 +501,7 @@
 	       op->object->debug_id, (unsigned long long)ni_size);
 
 	if (object->backer) {
-		ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+		ASSERT(d_is_reg(object->backer));
 
 		fscache_set_store_limit(&object->fscache, ni_size);
 
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 7f8e83f..1e51714e 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -277,7 +277,7 @@
 	_debug("remove %p from %p", rep, dir);
 
 	/* non-directories can just be unlinked */
-	if (!S_ISDIR(rep->d_inode->i_mode)) {
+	if (!d_is_dir(rep)) {
 		_debug("unlink stale object");
 
 		path.mnt = cache->mnt;
@@ -323,7 +323,7 @@
 		return 0;
 	}
 
-	if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
+	if (!d_can_lookup(cache->graveyard)) {
 		unlock_rename(cache->graveyard, dir);
 		cachefiles_io_error(cache, "Graveyard no longer a directory");
 		return -EIO;
@@ -475,7 +475,7 @@
 	ASSERT(parent->dentry);
 	ASSERT(parent->dentry->d_inode);
 
-	if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
+	if (!(d_is_dir(parent->dentry))) {
 		// TODO: convert file to dir
 		_leave("looking up in none directory");
 		return -ENOBUFS;
@@ -539,7 +539,7 @@
 			_debug("mkdir -> %p{%p{ino=%lu}}",
 			       next, next->d_inode, next->d_inode->i_ino);
 
-		} else if (!S_ISDIR(next->d_inode->i_mode)) {
+		} else if (!d_can_lookup(next)) {
 			pr_err("inode %lu is not a directory\n",
 			       next->d_inode->i_ino);
 			ret = -ENOBUFS;
@@ -568,8 +568,8 @@
 			_debug("create -> %p{%p{ino=%lu}}",
 			       next, next->d_inode, next->d_inode->i_ino);
 
-		} else if (!S_ISDIR(next->d_inode->i_mode) &&
-			   !S_ISREG(next->d_inode->i_mode)
+		} else if (!d_can_lookup(next) &&
+			   !d_is_reg(next)
 			   ) {
 			pr_err("inode %lu is not a file or directory\n",
 			       next->d_inode->i_ino);
@@ -642,7 +642,7 @@
 
 	/* open a file interface onto a data file */
 	if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
-		if (S_ISREG(object->dentry->d_inode->i_mode)) {
+		if (d_is_reg(object->dentry)) {
 			const struct address_space_operations *aops;
 
 			ret = -EPERM;
@@ -763,7 +763,7 @@
 	/* we need to make sure the subdir is a directory */
 	ASSERT(subdir->d_inode);
 
-	if (!S_ISDIR(subdir->d_inode->i_mode)) {
+	if (!d_can_lookup(subdir)) {
 		pr_err("%s is not a directory\n", dirname);
 		ret = -EIO;
 		goto check_error;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 616db0e7..c6cd8d7 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -900,7 +900,7 @@
 		return -ENOBUFS;
 	}
 
-	ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+	ASSERT(d_is_reg(object->backer));
 
 	cache = container_of(object->fscache.cache,
 			     struct cachefiles_cache, cache);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0411dbb..83e9976 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -904,7 +904,7 @@
 	} else if (ceph_snap(dir) == CEPH_NOSNAP) {
 		dout("unlink/rmdir dir %p dn %p inode %p\n",
 		     dir, dentry, inode);
-		op = S_ISDIR(dentry->d_inode->i_mode) ?
+		op = d_is_dir(dentry) ?
 			CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
 	} else
 		goto out;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index a3d774b..d533075 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -292,7 +292,7 @@
 	}
 	if (err)
 		goto out_req;
-	if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
+	if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
 		/* make vfs retry on splice, ENOENT, or symlink */
 		dout("atomic_open finish_no_open on dn %p\n", dn);
 		err = finish_no_open(file, dn);
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 281ee01..60cb88c 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -304,7 +304,7 @@
 			     (const char *) old_name, (const char *)new_name);
 	if (!error) {
 		if (new_dentry->d_inode) {
-			if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+			if (d_is_dir(new_dentry)) {
 				coda_dir_drop_nlink(old_dir);
 				coda_dir_inc_nlink(new_dir);
 			}
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index a315677..b65d1ef 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -69,14 +69,13 @@
 extern int configfs_is_root(struct config_item *item);
 
 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
-extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
+extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
 
 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
 extern int configfs_make_dirent(struct configfs_dirent *,
 				struct dentry *, void *, umode_t, int);
 extern int configfs_dirent_is_ready(struct configfs_dirent *);
 
-extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
 
 extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index c9c298b..cf0db00 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -240,60 +240,26 @@
 	return 0;
 }
 
-static int init_dir(struct inode * inode)
+static void init_dir(struct inode * inode)
 {
 	inode->i_op = &configfs_dir_inode_operations;
 	inode->i_fop = &configfs_dir_operations;
 
 	/* directory inodes start off with i_nlink == 2 (for "." entry) */
 	inc_nlink(inode);
-	return 0;
 }
 
-static int configfs_init_file(struct inode * inode)
+static void configfs_init_file(struct inode * inode)
 {
 	inode->i_size = PAGE_SIZE;
 	inode->i_fop = &configfs_file_operations;
-	return 0;
 }
 
-static int init_symlink(struct inode * inode)
+static void init_symlink(struct inode * inode)
 {
 	inode->i_op = &configfs_symlink_inode_operations;
-	return 0;
 }
 
-static int create_dir(struct config_item *k, struct dentry *d)
-{
-	int error;
-	umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
-	struct dentry *p = d->d_parent;
-
-	BUG_ON(!k);
-
-	error = configfs_dirent_exists(p->d_fsdata, d->d_name.name);
-	if (!error)
-		error = configfs_make_dirent(p->d_fsdata, d, k, mode,
-					     CONFIGFS_DIR | CONFIGFS_USET_CREATING);
-	if (!error) {
-		configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata);
-		error = configfs_create(d, mode, init_dir);
-		if (!error) {
-			inc_nlink(p->d_inode);
-		} else {
-			struct configfs_dirent *sd = d->d_fsdata;
-			if (sd) {
-				spin_lock(&configfs_dirent_lock);
-				list_del_init(&sd->s_sibling);
-				spin_unlock(&configfs_dirent_lock);
-				configfs_put(sd);
-			}
-		}
-	}
-	return error;
-}
-
-
 /**
  *	configfs_create_dir - create a directory for an config_item.
  *	@item:		config_itemwe're creating directory for.
@@ -303,11 +269,37 @@
  *	until it is validated by configfs_dir_set_ready()
  */
 
-static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
 {
-	int error = create_dir(item, dentry);
-	if (!error)
+	int error;
+	umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+	struct dentry *p = dentry->d_parent;
+
+	BUG_ON(!item);
+
+	error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
+	if (unlikely(error))
+		return error;
+
+	error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
+				     CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+	if (unlikely(error))
+		return error;
+
+	configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
+	error = configfs_create(dentry, mode, init_dir);
+	if (!error) {
+		inc_nlink(p->d_inode);
 		item->ci_dentry = dentry;
+	} else {
+		struct configfs_dirent *sd = dentry->d_fsdata;
+		if (sd) {
+			spin_lock(&configfs_dirent_lock);
+			list_del_init(&sd->s_sibling);
+			spin_unlock(&configfs_dirent_lock);
+			configfs_put(sd);
+		}
+	}
 	return error;
 }
 
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 1d1c41f..56d2cdc 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -313,21 +313,6 @@
 	.release	= configfs_release,
 };
 
-
-int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
-{
-	struct configfs_dirent * parent_sd = dir->d_fsdata;
-	umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
-	int error = 0;
-
-	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
-	error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
-	mutex_unlock(&dir->d_inode->i_mutex);
-
-	return error;
-}
-
-
 /**
  *	configfs_create_file - create an attribute file for an item.
  *	@item:	item we're creating for.
@@ -336,9 +321,16 @@
 
 int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
 {
-	BUG_ON(!item || !item->ci_dentry || !attr);
+	struct dentry *dir = item->ci_dentry;
+	struct configfs_dirent *parent_sd = dir->d_fsdata;
+	umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
+	int error = 0;
 
-	return configfs_add_file(item->ci_dentry, attr,
-				 CONFIGFS_ITEM_ATTR);
+	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
+	error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
+				     CONFIGFS_ITEM_ATTR);
+	mutex_unlock(&dir->d_inode->i_mutex);
+
+	return error;
 }
 
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 65af861..5423a6a 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -176,7 +176,7 @@
 
 #endif /* CONFIG_LOCKDEP */
 
-int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
 {
 	int error = 0;
 	struct inode *inode = NULL;
@@ -198,13 +198,7 @@
 	p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
 	configfs_set_inode_lock_class(sd, inode);
 
-	if (init) {
-		error = init(inode);
-		if (error) {
-			iput(inode);
-			return error;
-		}
-	}
+	init(inode);
 	d_instantiate(dentry, inode);
 	if (S_ISDIR(mode) || S_ISLNK(mode))
 		dget(dentry);  /* pin link and directory dentries in core */
@@ -242,7 +236,7 @@
 
 	if (dentry) {
 		spin_lock(&dentry->d_lock);
-		if (!(d_unhashed(dentry) && dentry->d_inode)) {
+		if (!d_unhashed(dentry) && dentry->d_inode) {
 			dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
diff --git a/fs/coredump.c b/fs/coredump.c
index b5c86ff..f319926 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -572,7 +572,7 @@
 			 *
 			 * Normally core limits are irrelevant to pipes, since
 			 * we're not writing to the file system, but we use
-			 * cprm.limit of 1 here as a speacial value, this is a
+			 * cprm.limit of 1 here as a special value, this is a
 			 * consistent way to catch recursive crashes.
 			 * We can still crash if the core_pattern binary sets
 			 * RLIM_CORE = !1, but it runs as root, and can do
diff --git a/fs/dcache.c b/fs/dcache.c
index dc400fd..c71e373 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1659,9 +1659,25 @@
 }
 EXPORT_SYMBOL(d_set_d_op);
 
+
+/*
+ * d_set_fallthru - Mark a dentry as falling through to a lower layer
+ * @dentry - The dentry to mark
+ *
+ * Mark a dentry as falling through to the lower layer (as set with
+ * d_pin_lower()).  This flag may be recorded on the medium.
+ */
+void d_set_fallthru(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags |= DCACHE_FALLTHRU;
+	spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_set_fallthru);
+
 static unsigned d_flags_for_inode(struct inode *inode)
 {
-	unsigned add_flags = DCACHE_FILE_TYPE;
+	unsigned add_flags = DCACHE_REGULAR_TYPE;
 
 	if (!inode)
 		return DCACHE_MISS_TYPE;
@@ -1674,13 +1690,21 @@
 			else
 				inode->i_opflags |= IOP_LOOKUP;
 		}
-	} else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
-		if (unlikely(inode->i_op->follow_link))
-			add_flags = DCACHE_SYMLINK_TYPE;
-		else
-			inode->i_opflags |= IOP_NOFOLLOW;
+		goto type_determined;
 	}
 
+	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+		if (unlikely(inode->i_op->follow_link)) {
+			add_flags = DCACHE_SYMLINK_TYPE;
+			goto type_determined;
+		}
+		inode->i_opflags |= IOP_NOFOLLOW;
+	}
+
+	if (unlikely(!S_ISREG(inode->i_mode)))
+		add_flags = DCACHE_SPECIAL_TYPE;
+
+type_determined:
 	if (unlikely(IS_AUTOMOUNT(inode)))
 		add_flags |= DCACHE_NEED_AUTOMOUNT;
 	return add_flags;
@@ -1691,7 +1715,8 @@
 	unsigned add_flags = d_flags_for_inode(inode);
 
 	spin_lock(&dentry->d_lock);
-	__d_set_type(dentry, add_flags);
+	dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+	dentry->d_flags |= add_flags;
 	if (inode)
 		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
 	dentry->d_inode = inode;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 45b18a5..96400ab 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -169,10 +169,19 @@
 	return 0;
 }
 
+static void debugfs_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	clear_inode(inode);
+	if (S_ISLNK(inode->i_mode))
+		kfree(inode->i_private);
+}
+
 static const struct super_operations debugfs_super_operations = {
 	.statfs		= simple_statfs,
 	.remount_fs	= debugfs_remount,
 	.show_options	= debugfs_show_options,
+	.evict_inode	= debugfs_evict_inode,
 };
 
 static struct vfsmount *debugfs_automount(struct path *path)
@@ -511,23 +520,14 @@
 	int ret = 0;
 
 	if (debugfs_positive(dentry)) {
-		if (dentry->d_inode) {
-			dget(dentry);
-			switch (dentry->d_inode->i_mode & S_IFMT) {
-			case S_IFDIR:
-				ret = simple_rmdir(parent->d_inode, dentry);
-				break;
-			case S_IFLNK:
-				kfree(dentry->d_inode->i_private);
-				/* fall through */
-			default:
-				simple_unlink(parent->d_inode, dentry);
-				break;
-			}
-			if (!ret)
-				d_delete(dentry);
-			dput(dentry);
-		}
+		dget(dentry);
+		if (S_ISDIR(dentry->d_inode->i_mode))
+			ret = simple_rmdir(parent->d_inode, dentry);
+		else
+			simple_unlink(parent->d_inode, dentry);
+		if (!ret)
+			d_delete(dentry);
+		dput(dentry);
 	}
 	return ret;
 }
@@ -690,7 +690,7 @@
 	}
 	d_move(old_dentry, dentry);
 	fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
-		S_ISDIR(old_dentry->d_inode->i_mode),
+		d_is_dir(old_dentry),
 		NULL, old_dentry);
 	fsnotify_oldname_free(old_name);
 	unlock_rename(new_dir, old_dir);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 90d1882..5ba029e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -124,7 +124,7 @@
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
-#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32
+#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31
 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64
 #define ECRYPTFS_MAX_IV_BYTES 16	/* 128 bits */
 #define ECRYPTFS_SALT_BYTES 2
@@ -237,7 +237,7 @@
 	struct crypto_ablkcipher *tfm;
 	struct crypto_hash *hash_tfm; /* Crypto context for generating
 				       * the initialization vectors */
-	unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+	unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
 	unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
 	unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
 	struct list_head keysig_list;
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 6f4e659..fd39bad 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -230,7 +230,7 @@
 	}
 	ecryptfs_set_file_lower(
 		file, ecryptfs_inode_to_private(inode)->lower_file);
-	if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+	if (d_is_dir(ecryptfs_dentry)) {
 		ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
 		mutex_lock(&crypt_stat->cs_mutex);
 		crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
@@ -303,9 +303,22 @@
 	struct file *lower_file = ecryptfs_file_to_lower(file);
 	long rc = -ENOTTY;
 
-	if (lower_file->f_op->unlocked_ioctl)
+	if (!lower_file->f_op->unlocked_ioctl)
+		return rc;
+
+	switch (cmd) {
+	case FITRIM:
+	case FS_IOC_GETFLAGS:
+	case FS_IOC_SETFLAGS:
+	case FS_IOC_GETVERSION:
+	case FS_IOC_SETVERSION:
 		rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
-	return rc;
+		fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+		return rc;
+	default:
+		return rc;
+	}
 }
 
 #ifdef CONFIG_COMPAT
@@ -315,9 +328,22 @@
 	struct file *lower_file = ecryptfs_file_to_lower(file);
 	long rc = -ENOIOCTLCMD;
 
-	if (lower_file->f_op->compat_ioctl)
+	if (!lower_file->f_op->compat_ioctl)
+		return rc;
+
+	switch (cmd) {
+	case FITRIM:
+	case FS_IOC32_GETFLAGS:
+	case FS_IOC32_SETFLAGS:
+	case FS_IOC32_GETVERSION:
+	case FS_IOC32_SETVERSION:
 		rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
-	return rc;
+		fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+		return rc;
+	default:
+		return rc;
+	}
 }
 #endif
 
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 34b36a5..b08b518 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -907,9 +907,9 @@
 	lower_inode = ecryptfs_inode_to_lower(inode);
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
 	mutex_lock(&crypt_stat->cs_mutex);
-	if (S_ISDIR(dentry->d_inode->i_mode))
+	if (d_is_dir(dentry))
 		crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-	else if (S_ISREG(dentry->d_inode->i_mode)
+	else if (d_is_reg(dentry)
 		 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
 		     || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
 		struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 917bd5c..6bd67e2 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -891,7 +891,7 @@
 	struct blkcipher_desc desc;
 	char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
 	char iv[ECRYPTFS_MAX_IV_BYTES];
-	char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+	char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
 };
 
 /**
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 1895d60..c095d32 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -407,7 +407,7 @@
 	if (!cipher_name_set) {
 		int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
 
-		BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE);
+		BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
 		strcpy(mount_crypt_stat->global_default_cipher_name,
 		       ECRYPTFS_DEFAULT_CIPHER);
 	}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index fdfd206..714cd37 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -429,7 +429,7 @@
 	if (IS_ERR(result))
 		return result;
 
-	if (S_ISDIR(result->d_inode->i_mode)) {
+	if (d_is_dir(result)) {
 		/*
 		 * This request is for a directory.
 		 *
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 982d934..f63c3d5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -364,7 +364,8 @@
 #define EXT4_DIRTY_FL			0x00000100
 #define EXT4_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
 #define EXT4_NOCOMPR_FL			0x00000400 /* Don't compress */
-#define EXT4_ECOMPR_FL			0x00000800 /* Compression error */
+	/* nb: was previously EXT2_ECOMPR_FL */
+#define EXT4_ENCRYPT_FL			0x00000800 /* encrypted file */
 /* End compression flags --- maybe not all used */
 #define EXT4_INDEX_FL			0x00001000 /* hash-indexed directory */
 #define EXT4_IMAGIC_FL			0x00002000 /* AFS directory */
@@ -421,7 +422,7 @@
 	EXT4_INODE_DIRTY	= 8,
 	EXT4_INODE_COMPRBLK	= 9,	/* One or more compressed clusters */
 	EXT4_INODE_NOCOMPR	= 10,	/* Don't compress */
-	EXT4_INODE_ECOMPR	= 11,	/* Compression error */
+	EXT4_INODE_ENCRYPT	= 11,	/* Compression error */
 /* End compression flags --- maybe not all used */
 	EXT4_INODE_INDEX	= 12,	/* hash-indexed directory */
 	EXT4_INODE_IMAGIC	= 13,	/* AFS directory */
@@ -466,7 +467,7 @@
 	CHECK_FLAG_VALUE(DIRTY);
 	CHECK_FLAG_VALUE(COMPRBLK);
 	CHECK_FLAG_VALUE(NOCOMPR);
-	CHECK_FLAG_VALUE(ECOMPR);
+	CHECK_FLAG_VALUE(ENCRYPT);
 	CHECK_FLAG_VALUE(INDEX);
 	CHECK_FLAG_VALUE(IMAGIC);
 	CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -1048,6 +1049,12 @@
 /* Metadata checksum algorithm codes */
 #define EXT4_CRC32C_CHKSUM		1
 
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID		0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS	1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM	2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC	3
+
 /*
  * Structure of the super block
  */
@@ -1161,7 +1168,8 @@
 	__le32	s_grp_quota_inum;	/* inode for tracking group quota */
 	__le32	s_overhead_clusters;	/* overhead blocks/clusters in fs */
 	__le32	s_backup_bgs[2];	/* groups with sparse_super2 SBs */
-	__le32	s_reserved[106];	/* Padding to the end of the block */
+	__u8	s_encrypt_algos[4];	/* Encryption algorithms in use  */
+	__le32	s_reserved[105];	/* Padding to the end of the block */
 	__le32	s_checksum;		/* crc32c(superblock) */
 };
 
@@ -1527,6 +1535,7 @@
  * GDT_CSUM bits are mutually exclusive.
  */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM	0x0400
+#define EXT4_FEATURE_RO_COMPAT_READONLY		0x1000
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION	0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE		0x0002
@@ -1542,6 +1551,7 @@
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM	0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR		0x4000 /* >2GB or 3-lvl htree */
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA	0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_ENCRYPT		0x10000
 
 #define EXT2_FEATURE_COMPAT_SUPP	EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 6b9878a..45fe924 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1401,10 +1401,7 @@
 				 * to free. Everything was covered by the start
 				 * of the range.
 				 */
-				return 0;
-			} else {
-				/* Shared branch grows from an indirect block */
-				partial2--;
+				goto do_indirects;
 			}
 		} else {
 			/*
@@ -1435,56 +1432,96 @@
 	/* Punch happened within the same level (n == n2) */
 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
 	partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
-	/*
-	 * ext4_find_shared returns Indirect structure which
-	 * points to the last element which should not be
-	 * removed by truncate. But this is end of the range
-	 * in punch_hole so we need to point to the next element
-	 */
-	partial2->p++;
-	while ((partial > chain) || (partial2 > chain2)) {
-		/* We're at the same block, so we're almost finished */
-		if ((partial->bh && partial2->bh) &&
-		    (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
-			if ((partial > chain) && (partial2 > chain2)) {
-				ext4_free_branches(handle, inode, partial->bh,
-						   partial->p + 1,
-						   partial2->p,
-						   (chain+n-1) - partial);
-				BUFFER_TRACE(partial->bh, "call brelse");
-				brelse(partial->bh);
-				BUFFER_TRACE(partial2->bh, "call brelse");
-				brelse(partial2->bh);
+
+	/* Free top, but only if partial2 isn't its subtree. */
+	if (nr) {
+		int level = min(partial - chain, partial2 - chain2);
+		int i;
+		int subtree = 1;
+
+		for (i = 0; i <= level; i++) {
+			if (offsets[i] != offsets2[i]) {
+				subtree = 0;
+				break;
 			}
+		}
+
+		if (!subtree) {
+			if (partial == chain) {
+				/* Shared branch grows from the inode */
+				ext4_free_branches(handle, inode, NULL,
+						   &nr, &nr+1,
+						   (chain+n-1) - partial);
+				*partial->p = 0;
+			} else {
+				/* Shared branch grows from an indirect block */
+				BUFFER_TRACE(partial->bh, "get_write_access");
+				ext4_free_branches(handle, inode, partial->bh,
+						   partial->p,
+						   partial->p+1,
+						   (chain+n-1) - partial);
+			}
+		}
+	}
+
+	if (!nr2) {
+		/*
+		 * ext4_find_shared returns Indirect structure which
+		 * points to the last element which should not be
+		 * removed by truncate. But this is end of the range
+		 * in punch_hole so we need to point to the next element
+		 */
+		partial2->p++;
+	}
+
+	while (partial > chain || partial2 > chain2) {
+		int depth = (chain+n-1) - partial;
+		int depth2 = (chain2+n2-1) - partial2;
+
+		if (partial > chain && partial2 > chain2 &&
+		    partial->bh->b_blocknr == partial2->bh->b_blocknr) {
+			/*
+			 * We've converged on the same block. Clear the range,
+			 * then we're done.
+			 */
+			ext4_free_branches(handle, inode, partial->bh,
+					   partial->p + 1,
+					   partial2->p,
+					   (chain+n-1) - partial);
+			BUFFER_TRACE(partial->bh, "call brelse");
+			brelse(partial->bh);
+			BUFFER_TRACE(partial2->bh, "call brelse");
+			brelse(partial2->bh);
 			return 0;
 		}
+
 		/*
-		 * Clear the ends of indirect blocks on the shared branch
-		 * at the start of the range
+		 * The start and end partial branches may not be at the same
+		 * level even though the punch happened within one level. So, we
+		 * give them a chance to arrive at the same level, then walk
+		 * them in step with each other until we converge on the same
+		 * block.
 		 */
-		if (partial > chain) {
+		if (partial > chain && depth <= depth2) {
 			ext4_free_branches(handle, inode, partial->bh,
-				   partial->p + 1,
-				   (__le32 *)partial->bh->b_data+addr_per_block,
-				   (chain+n-1) - partial);
+					   partial->p + 1,
+					   (__le32 *)partial->bh->b_data+addr_per_block,
+					   (chain+n-1) - partial);
 			BUFFER_TRACE(partial->bh, "call brelse");
 			brelse(partial->bh);
 			partial--;
 		}
-		/*
-		 * Clear the ends of indirect blocks on the shared branch
-		 * at the end of the range
-		 */
-		if (partial2 > chain2) {
+		if (partial2 > chain2 && depth2 <= depth) {
 			ext4_free_branches(handle, inode, partial2->bh,
 					   (__le32 *)partial2->bh->b_data,
 					   partial2->p,
-					   (chain2+n-1) - partial2);
+					   (chain2+n2-1) - partial2);
 			BUFFER_TRACE(partial2->bh, "call brelse");
 			brelse(partial2->bh);
 			partial2--;
 		}
 	}
+	return 0;
 
 do_indirects:
 	/* Kill the remaining (whole) subtrees */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 85404f1..5cb9a21 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1024,6 +1024,7 @@
 {
 	handle_t *handle = ext4_journal_current_handle();
 	struct inode *inode = mapping->host;
+	loff_t old_size = inode->i_size;
 	int ret = 0, ret2;
 	int i_size_changed = 0;
 
@@ -1054,6 +1055,8 @@
 	unlock_page(page);
 	page_cache_release(page);
 
+	if (old_size < pos)
+		pagecache_isize_extended(inode, old_size, pos);
 	/*
 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
 	 * makes the holding time of page lock longer. Second, it forces lock
@@ -1095,6 +1098,7 @@
 {
 	handle_t *handle = ext4_journal_current_handle();
 	struct inode *inode = mapping->host;
+	loff_t old_size = inode->i_size;
 	int ret = 0, ret2;
 	int partial = 0;
 	unsigned from, to;
@@ -1127,6 +1131,9 @@
 	unlock_page(page);
 	page_cache_release(page);
 
+	if (old_size < pos)
+		pagecache_isize_extended(inode, old_size, pos);
+
 	if (size_changed) {
 		ret2 = ext4_mark_inode_dirty(handle, inode);
 		if (!ret)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1adac68..e061e66 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2779,6 +2779,12 @@
 	if (readonly)
 		return 1;
 
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) {
+		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
+		sb->s_flags |= MS_RDONLY;
+		return 1;
+	}
+
 	/* Check that feature set is OK for a read-write mount */
 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
 		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
@@ -3936,9 +3942,8 @@
 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 	spin_lock_init(&sbi->s_next_gen_lock);
 
-	init_timer(&sbi->s_err_report);
-	sbi->s_err_report.function = print_daily_error_info;
-	sbi->s_err_report.data = (unsigned long) sb;
+	setup_timer(&sbi->s_err_report, print_daily_error_info,
+		(unsigned long) sb);
 
 	/* Register extent status tree shrinker */
 	if (ext4_es_register_shrinker(sbi))
@@ -4866,9 +4871,6 @@
 	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
 		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
-	/*
-	 * Allow the "check" option to be passed as a remount option.
-	 */
 	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
 		err = -EINVAL;
 		goto restore_opts;
@@ -4877,17 +4879,8 @@
 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
 	    test_opt(sb, JOURNAL_CHECKSUM)) {
 		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-			 "during remount not supported");
-		err = -EINVAL;
-		goto restore_opts;
-	}
-
-	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
-	    test_opt(sb, JOURNAL_CHECKSUM)) {
-		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-			 "during remount not supported");
-		err = -EINVAL;
-		goto restore_opts;
+			 "during remount not supported; ignoring");
+		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
 	}
 
 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
@@ -4963,7 +4956,9 @@
 				ext4_mark_recovery_complete(sb, es);
 		} else {
 			/* Make sure we can mount this feature set readwrite */
-			if (!ext4_feature_set_ok(sb, 0)) {
+			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_READONLY) ||
+			    !ext4_feature_set_ok(sb, 0)) {
 				err = -EROFS;
 				goto restore_opts;
 			}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 073657f..e907052 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -769,9 +769,9 @@
 		struct inode *inode = wb_inode(wb->b_io.prev);
 		struct super_block *sb = inode->i_sb;
 
-		if (!grab_super_passive(sb)) {
+		if (!trylock_super(sb)) {
 			/*
-			 * grab_super_passive() may fail consistently due to
+			 * trylock_super() may fail consistently due to
 			 * s_umount being grabbed by someone else. Don't use
 			 * requeue_io() to avoid busy retrying the inode/sb.
 			 */
@@ -779,7 +779,7 @@
 			continue;
 		}
 		wrote += writeback_sb_inodes(sb, wb, work);
-		drop_super(sb);
+		up_read(&sb->s_umount);
 
 		/* refer to the same tests at the end of writeback_sb_inodes */
 		if (wrote) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ed19a7d..39706c5 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -890,8 +890,8 @@
 
 	newpage = buf->page;
 
-	if (WARN_ON(!PageUptodate(newpage)))
-		return -EIO;
+	if (!PageUptodate(newpage))
+		SetPageUptodate(newpage);
 
 	ClearPageMappedToDisk(newpage);
 
@@ -1353,6 +1353,17 @@
 	return err;
 }
 
+static int fuse_dev_open(struct inode *inode, struct file *file)
+{
+	/*
+	 * The fuse device's file's private_data is used to hold
+	 * the fuse_conn(ection) when it is mounted, and is used to
+	 * keep track of whether the file has been mounted already.
+	 */
+	file->private_data = NULL;
+	return 0;
+}
+
 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
 			      unsigned long nr_segs, loff_t pos)
 {
@@ -1797,6 +1808,9 @@
 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
 		       unsigned int size, struct fuse_copy_state *cs)
 {
+	/* Don't try to move pages (yet) */
+	cs->move_pages = 0;
+
 	switch (code) {
 	case FUSE_NOTIFY_POLL:
 		return fuse_notify_poll(fc, size, cs);
@@ -2217,6 +2231,7 @@
 
 const struct file_operations fuse_dev_operations = {
 	.owner		= THIS_MODULE,
+	.open		= fuse_dev_open,
 	.llseek		= no_llseek,
 	.read		= do_sync_read,
 	.aio_read	= fuse_dev_read,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 08e7b1a..1545b71 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -971,7 +971,7 @@
 			err = -EBUSY;
 			goto badentry;
 		}
-		if (S_ISDIR(entry->d_inode->i_mode)) {
+		if (d_is_dir(entry)) {
 			shrink_dcache_parent(entry);
 			if (!simple_empty(entry)) {
 				err = -ENOTEMPTY;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 6371192..487527b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1809,7 +1809,7 @@
 		gfs2_consist_inode(dip);
 	dip->i_entries--;
 	dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
-	if (S_ISDIR(dentry->d_inode->i_mode))
+	if (d_is_dir(dentry))
 		drop_nlink(&dip->i_inode);
 	mark_inode_dirty(&dip->i_inode);
 
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 435bea2..f0235c1 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -530,7 +530,7 @@
 
 	/* Unlink destination if it already exists */
 	if (new_dentry->d_inode) {
-		if (S_ISDIR(new_dentry->d_inode->i_mode))
+		if (d_is_dir(new_dentry))
 			res = hfsplus_rmdir(new_dir, new_dentry);
 		else
 			res = hfsplus_unlink(new_dir, new_dentry);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 5f27551..043ac9d 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -678,10 +678,10 @@
 		return NULL;
 	}
 
-	if (S_ISDIR(dentry->d_inode->i_mode)) {
+	if (d_is_dir(dentry)) {
 		inode->i_op = &hppfs_dir_iops;
 		inode->i_fop = &hppfs_dir_fops;
-	} else if (S_ISLNK(dentry->d_inode->i_mode)) {
+	} else if (d_is_symlink(dentry)) {
 		inode->i_op = &hppfs_link_iops;
 		inode->i_fop = &hppfs_file_fops;
 	} else {
diff --git a/fs/internal.h b/fs/internal.h
index 30459da..01dce1d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -84,7 +84,7 @@
  * super.c
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
-extern bool grab_super_passive(struct super_block *sb);
+extern bool trylock_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
 			       int, const char *, void *);
 extern struct super_block *user_get_super(dev_t);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index bcbef08..b5128c6 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -524,6 +524,9 @@
 			if (descr_csum_size > 0 &&
 			    !jbd2_descr_block_csum_verify(journal,
 							  bh->b_data)) {
+				printk(KERN_ERR "JBD2: Invalid checksum "
+				       "recovering block %lu in log\n",
+				       next_log_block);
 				err = -EIO;
 				brelse(bh);
 				goto failed;
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 9385560..f21b6fb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -252,7 +252,7 @@
 	if (!f->inocache)
 		return -EIO;
 
-	if (S_ISDIR(old_dentry->d_inode->i_mode))
+	if (d_is_dir(old_dentry))
 		return -EPERM;
 
 	/* XXX: This is ugly */
@@ -772,7 +772,7 @@
 	 */
 	if (new_dentry->d_inode) {
 		victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
-		if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+		if (d_is_dir(new_dentry)) {
 			struct jffs2_full_dirent *fd;
 
 			mutex_lock(&victim_f->sem);
@@ -807,7 +807,7 @@
 
 	if (victim_f) {
 		/* There was a victim. Kill it off nicely */
-		if (S_ISDIR(new_dentry->d_inode->i_mode))
+		if (d_is_dir(new_dentry))
 			clear_nlink(new_dentry->d_inode);
 		else
 			drop_nlink(new_dentry->d_inode);
@@ -815,7 +815,7 @@
 		   inode which didn't exist. */
 		if (victim_f->inocache) {
 			mutex_lock(&victim_f->sem);
-			if (S_ISDIR(new_dentry->d_inode->i_mode))
+			if (d_is_dir(new_dentry))
 				victim_f->inocache->pino_nlink = 0;
 			else
 				victim_f->inocache->pino_nlink--;
@@ -825,7 +825,7 @@
 
 	/* If it was a directory we moved, and there was no victim,
 	   increase i_nlink on its new parent */
-	if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+	if (d_is_dir(old_dentry) && !victim_f)
 		inc_nlink(new_dir_i);
 
 	/* Unlink the original */
@@ -839,7 +839,7 @@
 		struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
 		mutex_lock(&f->sem);
 		inc_nlink(old_dentry->d_inode);
-		if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode))
+		if (f->inocache && !d_is_dir(old_dentry))
 			f->inocache->pino_nlink++;
 		mutex_unlock(&f->sem);
 
@@ -852,7 +852,7 @@
 		return ret;
 	}
 
-	if (S_ISDIR(old_dentry->d_inode->i_mode))
+	if (d_is_dir(old_dentry))
 		drop_nlink(old_dir_i);
 
 	new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0918f0e..3d76f28 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -138,7 +138,7 @@
 	struct jffs2_inode_info *f;
 	uint32_t pino;
 
-	BUG_ON(!S_ISDIR(child->d_inode->i_mode));
+	BUG_ON(!d_is_dir(child));
 
 	f = JFFS2_INODE_INFO(child->d_inode);
 
diff --git a/fs/libfs.c b/fs/libfs.c
index b2ffdb0..0ab6512 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -329,7 +329,7 @@
 		struct inode *new_dir, struct dentry *new_dentry)
 {
 	struct inode *inode = old_dentry->d_inode;
-	int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
+	int they_are_dirs = d_is_dir(old_dentry);
 
 	if (!simple_empty(new_dentry))
 		return -ENOTEMPTY;
diff --git a/fs/locks.c b/fs/locks.c
index 365c82e..528fedf 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1665,7 +1665,8 @@
 	}
 
 	if (my_fl != NULL) {
-		error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
+		lease = my_fl;
+		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
 		if (error)
 			goto out;
 		goto out_setup;
@@ -1727,7 +1728,7 @@
 			break;
 		}
 	}
-	trace_generic_delete_lease(inode, fl);
+	trace_generic_delete_lease(inode, victim);
 	if (victim)
 		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
 	spin_unlock(&ctx->flc_lock);
diff --git a/fs/namei.c b/fs/namei.c
index 96ca11d..c83145a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2814,7 +2814,7 @@
 			} else if (!dentry->d_inode) {
 				goto out;
 			} else if ((open_flag & O_TRUNC) &&
-				   S_ISREG(dentry->d_inode->i_mode)) {
+				   d_is_reg(dentry)) {
 				goto out;
 			}
 			/* will fail later, go on to get the right error */
diff --git a/fs/namespace.c b/fs/namespace.c
index 72a286e..82ef140 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1907,8 +1907,8 @@
 	if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
 		return -EINVAL;
 
-	if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
-	      S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
+	if (d_is_dir(mp->m_dentry) !=
+	      d_is_dir(mnt->mnt.mnt_root))
 		return -ENOTDIR;
 
 	return attach_recursive_mnt(mnt, p, mp, NULL);
@@ -2180,8 +2180,8 @@
 	if (!mnt_has_parent(old))
 		goto out1;
 
-	if (S_ISDIR(path->dentry->d_inode->i_mode) !=
-	      S_ISDIR(old_path.dentry->d_inode->i_mode))
+	if (d_is_dir(path->dentry) !=
+	      d_is_dir(old_path.dentry))
 		goto out1;
 	/*
 	 * Don't move a mount residing in a shared parent.
@@ -2271,7 +2271,7 @@
 		goto unlock;
 
 	err = -EINVAL;
-	if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
+	if (d_is_symlink(newmnt->mnt.mnt_root))
 		goto unlock;
 
 	newmnt->mnt.mnt_flags = mnt_flags;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index e36a9d7..197806f 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -427,6 +427,8 @@
 	if (clp == NULL)
 		goto out;
 
+	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+		goto out;
 	tbl = &clp->cl_session->bc_slot_table;
 
 	spin_lock(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index f4ccfe6..19ca95c 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -313,7 +313,7 @@
 		goto out;
 	}
 
-	args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+	args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
 	if (!args->devs) {
 		status = htonl(NFS4ERR_DELAY);
 		goto out;
@@ -415,7 +415,7 @@
 			     rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
 		if (unlikely(p == NULL))
 			goto out;
-		rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+		rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
 						sizeof(*rc_list->rcl_refcalls),
 						GFP_KERNEL);
 		if (unlikely(rc_list->rcl_refcalls == NULL))
@@ -464,8 +464,10 @@
 
 		for (i = 0; i < args->csa_nrclists; i++) {
 			status = decode_rc_list(xdr, &args->csa_rclists[i]);
-			if (status)
+			if (status) {
+				args->csa_nrclists = i;
 				goto out_free;
+			}
 		}
 	}
 	status = 0;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f9f4845..1987415 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -433,7 +433,7 @@
 
 static bool nfs_client_init_is_complete(const struct nfs_client *clp)
 {
-	return clp->cl_cons_state != NFS_CS_INITING;
+	return clp->cl_cons_state <= NFS_CS_READY;
 }
 
 int nfs_wait_client_init_complete(const struct nfs_client *clp)
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index da54332..a6ad688 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -180,10 +180,9 @@
 			delegation->cred = get_rpccred(cred);
 			clear_bit(NFS_DELEGATION_NEED_RECLAIM,
 				  &delegation->flags);
-			NFS_I(inode)->delegation_state = delegation->type;
 			spin_unlock(&delegation->lock);
-			put_rpccred(oldcred);
 			rcu_read_unlock();
+			put_rpccred(oldcred);
 			trace_nfs4_reclaim_delegation(inode, res->delegation_type);
 		} else {
 			/* We appear to have raced with a delegation return. */
@@ -275,7 +274,6 @@
 	set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
-	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
@@ -355,7 +353,6 @@
 					&delegation->stateid)) {
 			nfs_update_inplace_delegation(old_delegation,
 					delegation);
-			nfsi->delegation_state = old_delegation->type;
 			goto out;
 		}
 		/*
@@ -373,13 +370,15 @@
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, 
+		if (test_and_set_bit(NFS_DELEGATION_RETURNING,
+					&old_delegation->flags))
+			goto out;
+		freeme = nfs_detach_delegation_locked(nfsi,
 				old_delegation, clp);
 		if (freeme == NULL)
 			goto out;
 	}
 	list_add_rcu(&delegation->super_list, &server->delegations);
-	nfsi->delegation_state = delegation->type;
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
 
@@ -437,6 +436,8 @@
 {
 	bool ret = false;
 
+	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+		goto out;
 	if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
 		ret = true;
 	if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
@@ -448,6 +449,7 @@
 			ret = true;
 		spin_unlock(&delegation->lock);
 	}
+out:
 	return ret;
 }
 
@@ -475,14 +477,20 @@
 								super_list) {
 			if (!nfs_delegation_need_return(delegation))
 				continue;
-			inode = nfs_delegation_grab_inode(delegation);
-			if (inode == NULL)
+			if (!nfs_sb_active(server->super))
 				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL) {
+				rcu_read_unlock();
+				nfs_sb_deactive(server->super);
+				goto restart;
+			}
 			delegation = nfs_start_delegation_return_locked(NFS_I(inode));
 			rcu_read_unlock();
 
 			err = nfs_end_delegation_return(inode, delegation, 0);
 			iput(inode);
+			nfs_sb_deactive(server->super);
 			if (!err)
 				goto restart;
 			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
@@ -813,19 +821,30 @@
 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 		list_for_each_entry_rcu(delegation, &server->delegations,
 								super_list) {
+			if (test_bit(NFS_DELEGATION_RETURNING,
+						&delegation->flags))
+				continue;
 			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
 						&delegation->flags) == 0)
 				continue;
-			inode = nfs_delegation_grab_inode(delegation);
-			if (inode == NULL)
+			if (!nfs_sb_active(server->super))
 				continue;
-			delegation = nfs_detach_delegation(NFS_I(inode),
-					delegation, server);
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL) {
+				rcu_read_unlock();
+				nfs_sb_deactive(server->super);
+				goto restart;
+			}
+			delegation = nfs_start_delegation_return_locked(NFS_I(inode));
 			rcu_read_unlock();
-
-			if (delegation != NULL)
-				nfs_free_delegation(delegation);
+			if (delegation != NULL) {
+				delegation = nfs_detach_delegation(NFS_I(inode),
+					delegation, server);
+				if (delegation != NULL)
+					nfs_free_delegation(delegation);
+			}
 			iput(inode);
+			nfs_sb_deactive(server->super);
 			goto restart;
 		}
 	}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 9b0c55c..c19e16f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -408,14 +408,22 @@
 	return 0;
 }
 
+/* Match file and dirent using either filehandle or fileid
+ * Note: caller is responsible for checking the fsid
+ */
 static
 int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
 {
+	struct nfs_inode *nfsi;
+
 	if (dentry->d_inode == NULL)
 		goto different;
-	if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
-		goto different;
-	return 1;
+
+	nfsi = NFS_I(dentry->d_inode);
+	if (entry->fattr->fileid == nfsi->fileid)
+		return 1;
+	if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
+		return 1;
 different:
 	return 0;
 }
@@ -469,6 +477,10 @@
 	struct inode *inode;
 	int status;
 
+	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
+		return;
+	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
+		return;
 	if (filename.name[0] == '.') {
 		if (filename.len == 1)
 			return;
@@ -479,6 +491,10 @@
 
 	dentry = d_lookup(parent, &filename);
 	if (dentry != NULL) {
+		/* Is there a mountpoint here? If so, just exit */
+		if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
+					&entry->fattr->fsid))
+			goto out;
 		if (nfs_same_file(dentry, entry)) {
 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
 			status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7077521..e907c8c 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -283,7 +283,7 @@
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
 			      struct nfs_direct_req *dreq)
 {
-	cinfo->lock = &dreq->lock;
+	cinfo->lock = &dreq->inode->i_lock;
 	cinfo->mds = &dreq->mds_cinfo;
 	cinfo->ds = &dreq->ds_cinfo;
 	cinfo->dreq = dreq;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 94712fc..e679d24 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -178,7 +178,7 @@
 		iocb->ki_filp,
 		iov_iter_count(to), (unsigned long) iocb->ki_pos);
 
-	result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+	result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping);
 	if (!result) {
 		result = generic_file_read_iter(iocb, to);
 		if (result > 0)
@@ -199,7 +199,7 @@
 	dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
 		filp, (unsigned long) count, (unsigned long long) *ppos);
 
-	res = nfs_revalidate_mapping(inode, filp->f_mapping);
+	res = nfs_revalidate_mapping_protected(inode, filp->f_mapping);
 	if (!res) {
 		res = generic_file_splice_read(filp, ppos, pipe, count, flags);
 		if (res > 0)
@@ -372,6 +372,10 @@
 				 nfs_wait_bit_killable, TASK_KILLABLE);
 	if (ret)
 		return ret;
+	/*
+	 * Wait for O_DIRECT to complete
+	 */
+	nfs_inode_dio_wait(mapping->host);
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
@@ -619,6 +623,9 @@
 	/* make sure the cache has finished storing the page */
 	nfs_fscache_wait_on_page_write(NFS_I(inode), page);
 
+	wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
+			nfs_wait_bit_killable, TASK_KILLABLE);
+
 	lock_page(page);
 	mapping = page_file_mapping(page);
 	if (mapping != inode->i_mapping)
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 7ae1c26..91e88a7 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -960,52 +960,19 @@
 {
 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 	u32 i, j;
-	struct list_head *list;
-	struct pnfs_commit_bucket *buckets;
 
 	if (fl->commit_through_mds) {
-		list = &cinfo->mds->list;
-		spin_lock(cinfo->lock);
-		goto mds_commit;
-	}
-
-	/* Note that we are calling nfs4_fl_calc_j_index on each page
-	 * that ends up being committed to a data server.  An attractive
-	 * alternative is to add a field to nfs_write_data and nfs_page
-	 * to store the value calculated in filelayout_write_pagelist
-	 * and just use that here.
-	 */
-	j = nfs4_fl_calc_j_index(lseg, req_offset(req));
-	i = select_bucket_index(fl, j);
-	spin_lock(cinfo->lock);
-	buckets = cinfo->ds->buckets;
-	list = &buckets[i].written;
-	if (list_empty(list)) {
-		/* Non-empty buckets hold a reference on the lseg.  That ref
-		 * is normally transferred to the COMMIT call and released
-		 * there.  It could also be released if the last req is pulled
-		 * off due to a rewrite, in which case it will be done in
-		 * pnfs_generic_clear_request_commit
+		nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
+	} else {
+		/* Note that we are calling nfs4_fl_calc_j_index on each page
+		 * that ends up being committed to a data server.  An attractive
+		 * alternative is to add a field to nfs_write_data and nfs_page
+		 * to store the value calculated in filelayout_write_pagelist
+		 * and just use that here.
 		 */
-		buckets[i].wlseg = pnfs_get_lseg(lseg);
-	}
-	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-	cinfo->ds->nwritten++;
-
-mds_commit:
-	/* nfs_request_add_commit_list(). We need to add req to list without
-	 * dropping cinfo lock.
-	 */
-	set_bit(PG_CLEAN, &(req)->wb_flags);
-	nfs_list_add_request(req, list);
-	cinfo->mds->ncommit++;
-	spin_unlock(cinfo->lock);
-	if (!cinfo->dreq) {
-		inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-		inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-			     BDI_RECLAIMABLE);
-		__mark_inode_dirty(req->wb_context->dentry->d_inode,
-				   I_DIRTY_DATASYNC);
+		j = nfs4_fl_calc_j_index(lseg, req_offset(req));
+		i = select_bucket_index(fl, j);
+		pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
 	}
 }
 
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c22ecaa..315cc68 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1332,47 +1332,6 @@
 	return PNFS_ATTEMPTED;
 }
 
-static void
-ff_layout_mark_request_commit(struct nfs_page *req,
-			      struct pnfs_layout_segment *lseg,
-			      struct nfs_commit_info *cinfo,
-			      u32 ds_commit_idx)
-{
-	struct list_head *list;
-	struct pnfs_commit_bucket *buckets;
-
-	spin_lock(cinfo->lock);
-	buckets = cinfo->ds->buckets;
-	list = &buckets[ds_commit_idx].written;
-	if (list_empty(list)) {
-		/* Non-empty buckets hold a reference on the lseg.  That ref
-		 * is normally transferred to the COMMIT call and released
-		 * there.  It could also be released if the last req is pulled
-		 * off due to a rewrite, in which case it will be done in
-		 * pnfs_common_clear_request_commit
-		 */
-		WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
-		buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
-	}
-	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-	cinfo->ds->nwritten++;
-
-	/* nfs_request_add_commit_list(). We need to add req to list without
-	 * dropping cinfo lock.
-	 */
-	set_bit(PG_CLEAN, &(req)->wb_flags);
-	nfs_list_add_request(req, list);
-	cinfo->mds->ncommit++;
-	spin_unlock(cinfo->lock);
-	if (!cinfo->dreq) {
-		inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-		inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-			     BDI_RECLAIMABLE);
-		__mark_inode_dirty(req->wb_context->dentry->d_inode,
-				   I_DIRTY_DATASYNC);
-	}
-}
-
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
 {
 	return i;
@@ -1540,7 +1499,7 @@
 	.pg_write_ops		= &ff_layout_pg_write_ops,
 	.get_ds_info		= ff_layout_get_ds_info,
 	.free_deviceid_node	= ff_layout_free_deveiceid_node,
-	.mark_request_commit	= ff_layout_mark_request_commit,
+	.mark_request_commit	= pnfs_layout_mark_request_commit,
 	.clear_request_commit	= pnfs_generic_clear_request_commit,
 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e4f0dce..d42dff6 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -556,6 +556,7 @@
  * This is a copy of the common vmtruncate, but with the locking
  * corrected to take into account the fact that NFS requires
  * inode->i_size to be updated under the inode->i_lock.
+ * Note: must be called with inode->i_lock held!
  */
 static int nfs_vmtruncate(struct inode * inode, loff_t offset)
 {
@@ -565,14 +566,14 @@
 	if (err)
 		goto out;
 
-	spin_lock(&inode->i_lock);
 	i_size_write(inode, offset);
 	/* Optimisation */
 	if (offset == 0)
 		NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
-	spin_unlock(&inode->i_lock);
 
+	spin_unlock(&inode->i_lock);
 	truncate_pagecache(inode, offset);
+	spin_lock(&inode->i_lock);
 out:
 	return err;
 }
@@ -585,10 +586,15 @@
  * Note: we do this in the *proc.c in order to ensure that
  *       it works for things like exclusive creates too.
  */
-void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
+		struct nfs_fattr *fattr)
 {
+	/* Barrier: bump the attribute generation count. */
+	nfs_fattr_set_barrier(fattr);
+
+	spin_lock(&inode->i_lock);
+	NFS_I(inode)->attr_gencount = fattr->gencount;
 	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
-		spin_lock(&inode->i_lock);
 		if ((attr->ia_valid & ATTR_MODE) != 0) {
 			int mode = attr->ia_mode & S_IALLUGO;
 			mode |= inode->i_mode & ~S_IALLUGO;
@@ -600,12 +606,13 @@
 			inode->i_gid = attr->ia_gid;
 		nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
 				| NFS_INO_INVALID_ACL);
-		spin_unlock(&inode->i_lock);
 	}
 	if ((attr->ia_valid & ATTR_SIZE) != 0) {
 		nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
 		nfs_vmtruncate(inode, attr->ia_size);
 	}
+	nfs_update_inode(inode, fattr);
+	spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
 
@@ -1028,6 +1035,7 @@
 
 	if (mapping->nrpages != 0) {
 		if (S_ISREG(inode->i_mode)) {
+			unmap_mapping_range(mapping, 0, 0, 0);
 			ret = nfs_sync_mapping(mapping);
 			if (ret < 0)
 				return ret;
@@ -1060,11 +1068,14 @@
 }
 
 /**
- * nfs_revalidate_mapping - Revalidate the pagecache
+ * __nfs_revalidate_mapping - Revalidate the pagecache
  * @inode - pointer to host inode
  * @mapping - pointer to mapping
+ * @may_lock - take inode->i_mutex?
  */
-int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+static int __nfs_revalidate_mapping(struct inode *inode,
+		struct address_space *mapping,
+		bool may_lock)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
 	unsigned long *bitlock = &nfsi->flags;
@@ -1113,7 +1124,12 @@
 	nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
 	spin_unlock(&inode->i_lock);
 	trace_nfs_invalidate_mapping_enter(inode);
-	ret = nfs_invalidate_mapping(inode, mapping);
+	if (may_lock) {
+		mutex_lock(&inode->i_mutex);
+		ret = nfs_invalidate_mapping(inode, mapping);
+		mutex_unlock(&inode->i_mutex);
+	} else
+		ret = nfs_invalidate_mapping(inode, mapping);
 	trace_nfs_invalidate_mapping_exit(inode, ret);
 
 	clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
@@ -1123,6 +1139,29 @@
 	return ret;
 }
 
+/**
+ * nfs_revalidate_mapping - Revalidate the pagecache
+ * @inode - pointer to host inode
+ * @mapping - pointer to mapping
+ */
+int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+{
+	return __nfs_revalidate_mapping(inode, mapping, false);
+}
+
+/**
+ * nfs_revalidate_mapping_protected - Revalidate the pagecache
+ * @inode - pointer to host inode
+ * @mapping - pointer to mapping
+ *
+ * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex
+ * while invalidating the mapping.
+ */
+int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping)
+{
+	return __nfs_revalidate_mapping(inode, mapping, true);
+}
+
 static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
@@ -1231,13 +1270,6 @@
 	return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
 }
 
-static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
-{
-	if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
-		return 0;
-	return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
-}
-
 static atomic_long_t nfs_attr_generation_counter;
 
 static unsigned long nfs_read_attr_generation_counter(void)
@@ -1249,6 +1281,7 @@
 {
 	return atomic_long_inc_return(&nfs_attr_generation_counter);
 }
+EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
 
 void nfs_fattr_init(struct nfs_fattr *fattr)
 {
@@ -1260,6 +1293,22 @@
 }
 EXPORT_SYMBOL_GPL(nfs_fattr_init);
 
+/**
+ * nfs_fattr_set_barrier
+ * @fattr: attributes
+ *
+ * Used to set a barrier after an attribute was updated. This
+ * barrier ensures that older attributes from RPC calls that may
+ * have raced with our update cannot clobber these new values.
+ * Note that you are still responsible for ensuring that other
+ * operations which change the attribute on the server do not
+ * collide.
+ */
+void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
+{
+	fattr->gencount = nfs_inc_attr_generation_counter();
+}
+
 struct nfs_fattr *nfs_alloc_fattr(void)
 {
 	struct nfs_fattr *fattr;
@@ -1370,7 +1419,6 @@
 
 	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
 		nfs_ctime_need_update(inode, fattr) ||
-		nfs_size_need_update(inode, fattr) ||
 		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
 }
 
@@ -1460,6 +1508,7 @@
 	int status;
 
 	spin_lock(&inode->i_lock);
+	nfs_fattr_set_barrier(fattr);
 	status = nfs_post_op_update_inode_locked(inode, fattr);
 	spin_unlock(&inode->i_lock);
 
@@ -1468,7 +1517,7 @@
 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
 
 /**
- * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
+ * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
  * @inode - pointer to inode
  * @fattr - updated attributes
  *
@@ -1478,11 +1527,10 @@
  *
  * This function is mainly designed to be used by the ->write_done() functions.
  */
-int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
+int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
 {
 	int status;
 
-	spin_lock(&inode->i_lock);
 	/* Don't do a WCC update if these attributes are already stale */
 	if ((fattr->valid & NFS_ATTR_FATTR) == 0 ||
 			!nfs_inode_attrs_need_update(inode, fattr)) {
@@ -1514,6 +1562,27 @@
 	}
 out_noforce:
 	status = nfs_post_op_update_inode_locked(inode, fattr);
+	return status;
+}
+
+/**
+ * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it. Fake up
+ * weak cache consistency data, if none exist.
+ *
+ * This function is mainly designed to be used by the ->write_done() functions.
+ */
+int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
+{
+	int status;
+
+	spin_lock(&inode->i_lock);
+	nfs_fattr_set_barrier(fattr);
+	status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
 	spin_unlock(&inode->i_lock);
 	return status;
 }
@@ -1715,6 +1784,7 @@
 		nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
 		nfsi->attrtimeo_timestamp = now;
+		/* Set barrier to be more recent than all outstanding updates */
 		nfsi->attr_gencount = nfs_inc_attr_generation_counter();
 	} else {
 		if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
@@ -1722,6 +1792,9 @@
 				nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
 			nfsi->attrtimeo_timestamp = now;
 		}
+		/* Set the barrier to be more recent than this fattr */
+		if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
+			nfsi->attr_gencount = fattr->gencount;
 	}
 	invalid &= ~NFS_INO_INVALID_ATTR;
 	/* Don't invalidate the data if we were to blame */
@@ -1775,7 +1848,6 @@
 #if IS_ENABLED(CONFIG_NFS_V4)
 	INIT_LIST_HEAD(&nfsi->open_states);
 	nfsi->delegation = NULL;
-	nfsi->delegation_state = 0;
 	init_rwsem(&nfsi->rwsem);
 	nfsi->layout = NULL;
 #endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 212b8c8..9e6475b 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -459,6 +459,7 @@
 			     struct nfs_commit_info *cinfo,
 			     u32 ds_commit_idx);
 int nfs_write_need_commit(struct nfs_pgio_header *);
+void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
 			    int how, struct nfs_commit_info *cinfo);
 void nfs_retry_commit(struct list_head *page_list,
@@ -598,6 +599,19 @@
 }
 
 /*
+ * Record the page as unstable and mark its inode as dirty.
+ */
+static inline
+void nfs_mark_page_unstable(struct page *page)
+{
+	struct inode *inode = page_file_mapping(page)->host;
+
+	inc_zone_page_state(page, NR_UNSTABLE_NFS);
+	inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE);
+	 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+}
+
+/*
  * Determine the number of bytes of data the page contains
  */
 static inline
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 78e557c..1f11d25 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -138,7 +138,7 @@
 	nfs_fattr_init(fattr);
 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
 	if (status == 0)
-		nfs_setattr_update_inode(inode, sattr);
+		nfs_setattr_update_inode(inode, sattr, fattr);
 	dprintk("NFS reply setattr: %d\n", status);
 	return status;
 }
@@ -834,7 +834,7 @@
 	if (nfs3_async_handle_jukebox(task, inode))
 		return -EAGAIN;
 	if (task->tk_status >= 0)
-		nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr);
+		nfs_writeback_update_inode(hdr);
 	return 0;
 }
 
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 2a932fd..53852a4 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1987,6 +1987,11 @@
 		if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
 			entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
 
+		if (entry->fattr->fileid != entry->ino) {
+			entry->fattr->mounted_on_fileid = entry->ino;
+			entry->fattr->valid |= NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
+		}
+
 		/* In fact, a post_op_fh3: */
 		p = xdr_inline_decode(xdr, 4);
 		if (unlikely(p == NULL))
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8646af9..86d6214 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -621,6 +621,9 @@
 	spin_lock(&nn->nfs_client_lock);
 	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
 
+		if (pos == new)
+			goto found;
+
 		if (pos->rpc_ops != new->rpc_ops)
 			continue;
 
@@ -639,10 +642,6 @@
 			prev = pos;
 
 			status = nfs_wait_client_init_complete(pos);
-			if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
-				nfs4_schedule_lease_recovery(pos);
-				status = nfs4_wait_clnt_recover(pos);
-			}
 			spin_lock(&nn->nfs_client_lock);
 			if (status < 0)
 				break;
@@ -668,7 +667,7 @@
 		 */
 		if (!nfs4_match_client_owner_id(pos, new))
 			continue;
-
+found:
 		atomic_inc(&pos->cl_count);
 		*result = pos;
 		status = 0;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2e7c9f7..627f37c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -901,6 +901,7 @@
 	if (!cinfo->atomic || cinfo->before != dir->i_version)
 		nfs_force_lookup_revalidate(dir);
 	dir->i_version = cinfo->after;
+	nfsi->attr_gencount = nfs_inc_attr_generation_counter();
 	nfs_fscache_invalidate(dir);
 	spin_unlock(&dir->i_lock);
 }
@@ -1552,6 +1553,9 @@
 
 	opendata->o_arg.open_flags = 0;
 	opendata->o_arg.fmode = fmode;
+	opendata->o_arg.share_access = nfs4_map_atomic_open_share(
+			NFS_SB(opendata->dentry->d_sb),
+			fmode, 0);
 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
 	nfs4_init_opendata_res(opendata);
@@ -2413,8 +2417,8 @@
 				opendata->o_res.f_attr, sattr,
 				state, label, olabel);
 		if (status == 0) {
-			nfs_setattr_update_inode(state->inode, sattr);
-			nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+			nfs_setattr_update_inode(state->inode, sattr,
+					opendata->o_res.f_attr);
 			nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
 		}
 	}
@@ -2651,7 +2655,7 @@
 		case -NFS4ERR_BAD_STATEID:
 		case -NFS4ERR_EXPIRED:
 			if (!nfs4_stateid_match(&calldata->arg.stateid,
-						&state->stateid)) {
+						&state->open_stateid)) {
 				rpc_restart_call_prepare(task);
 				goto out_release;
 			}
@@ -2687,7 +2691,7 @@
 	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
 	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
 	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
-	nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid);
+	nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
 	/* Calculate the change in open mode */
 	calldata->arg.fmode = 0;
 	if (state->n_rdwr == 0) {
@@ -3288,7 +3292,7 @@
 
 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
 	if (status == 0) {
-		nfs_setattr_update_inode(inode, sattr);
+		nfs_setattr_update_inode(inode, sattr, fattr);
 		nfs_setsecurity(inode, fattr, label);
 	}
 	nfs4_label_free(label);
@@ -4234,7 +4238,7 @@
 	}
 	if (task->tk_status >= 0) {
 		renew_lease(NFS_SERVER(inode), hdr->timestamp);
-		nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr);
+		nfs_writeback_update_inode(hdr);
 	}
 	return 0;
 }
@@ -6648,47 +6652,47 @@
 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
+	struct nfs41_bind_conn_to_session_args args = {
+		.client = clp,
+		.dir = NFS4_CDFC4_FORE_OR_BOTH,
+	};
 	struct nfs41_bind_conn_to_session_res res;
 	struct rpc_message msg = {
 		.rpc_proc =
 			&nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
-		.rpc_argp = clp,
+		.rpc_argp = &args,
 		.rpc_resp = &res,
 		.rpc_cred = cred,
 	};
 
 	dprintk("--> %s\n", __func__);
 
-	res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
-	if (unlikely(res.session == NULL)) {
-		status = -ENOMEM;
-		goto out;
-	}
+	nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
+	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+		args.dir = NFS4_CDFC4_FORE;
 
 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 	trace_nfs4_bind_conn_to_session(clp, status);
 	if (status == 0) {
-		if (memcmp(res.session->sess_id.data,
+		if (memcmp(res.sessionid.data,
 		    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
 			dprintk("NFS: %s: Session ID mismatch\n", __func__);
 			status = -EIO;
-			goto out_session;
+			goto out;
 		}
-		if (res.dir != NFS4_CDFS4_BOTH) {
+		if ((res.dir & args.dir) != res.dir || res.dir == 0) {
 			dprintk("NFS: %s: Unexpected direction from server\n",
 				__func__);
 			status = -EIO;
-			goto out_session;
+			goto out;
 		}
-		if (res.use_conn_in_rdma_mode) {
+		if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
 			dprintk("NFS: %s: Server returned RDMA mode = true\n",
 				__func__);
 			status = -EIO;
-			goto out_session;
+			goto out;
 		}
 	}
-out_session:
-	kfree(res.session);
 out:
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
@@ -6893,9 +6897,13 @@
 
 	if (status == 0) {
 		clp->cl_clientid = res.clientid;
-		clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
-		if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+		clp->cl_exchange_flags = res.flags;
+		/* Client ID is not confirmed */
+		if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
+			clear_bit(NFS4_SESSION_ESTABLISHED,
+					&clp->cl_session->session_state);
 			clp->cl_seqid = res.seqid;
+		}
 
 		kfree(clp->cl_serverowner);
 		clp->cl_serverowner = res.server_owner;
@@ -7166,10 +7174,11 @@
 		args->bc_attrs.max_reqs);
 }
 
-static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
+		struct nfs41_create_session_res *res)
 {
 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
-	struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+	struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
 
 	if (rcvd->max_resp_sz > sent->max_resp_sz)
 		return -EINVAL;
@@ -7188,11 +7197,14 @@
 	return 0;
 }
 
-static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
+		struct nfs41_create_session_res *res)
 {
 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
-	struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
+	struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
 
+	if (!(res->flags & SESSION4_BACK_CHAN))
+		goto out;
 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
 		return -EINVAL;
 	if (rcvd->max_resp_sz < sent->max_resp_sz)
@@ -7204,18 +7216,33 @@
 		return -EINVAL;
 	if (rcvd->max_reqs != sent->max_reqs)
 		return -EINVAL;
+out:
 	return 0;
 }
 
 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
-				     struct nfs4_session *session)
+				     struct nfs41_create_session_res *res)
 {
 	int ret;
 
-	ret = nfs4_verify_fore_channel_attrs(args, session);
+	ret = nfs4_verify_fore_channel_attrs(args, res);
 	if (ret)
 		return ret;
-	return nfs4_verify_back_channel_attrs(args, session);
+	return nfs4_verify_back_channel_attrs(args, res);
+}
+
+static void nfs4_update_session(struct nfs4_session *session,
+		struct nfs41_create_session_res *res)
+{
+	nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
+	/* Mark client id and session as being confirmed */
+	session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
+	set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
+	session->flags = res->flags;
+	memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
+	if (res->flags & SESSION4_BACK_CHAN)
+		memcpy(&session->bc_attrs, &res->bc_attrs,
+				sizeof(session->bc_attrs));
 }
 
 static int _nfs4_proc_create_session(struct nfs_client *clp,
@@ -7224,11 +7251,12 @@
 	struct nfs4_session *session = clp->cl_session;
 	struct nfs41_create_session_args args = {
 		.client = clp,
+		.clientid = clp->cl_clientid,
+		.seqid = clp->cl_seqid,
 		.cb_program = NFS4_CALLBACK,
 	};
-	struct nfs41_create_session_res res = {
-		.client = clp,
-	};
+	struct nfs41_create_session_res res;
+
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
 		.rpc_argp = &args,
@@ -7245,11 +7273,15 @@
 
 	if (!status) {
 		/* Verify the session's negotiated channel_attrs values */
-		status = nfs4_verify_channel_attrs(&args, session);
+		status = nfs4_verify_channel_attrs(&args, &res);
 		/* Increment the clientid slot sequence id */
-		clp->cl_seqid++;
+		if (clp->cl_seqid == res.seqid)
+			clp->cl_seqid++;
+		if (status)
+			goto out;
+		nfs4_update_session(session, &res);
 	}
-
+out:
 	return status;
 }
 
@@ -7301,8 +7333,8 @@
 	dprintk("--> nfs4_proc_destroy_session\n");
 
 	/* session is still being setup */
-	if (session->clp->cl_cons_state != NFS_CS_READY)
-		return status;
+	if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
+		return 0;
 
 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 	trace_nfs4_destroy_session(session->clp, status);
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index e799dc3..e23366e 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -450,7 +450,7 @@
 	tbl = &ses->fc_slot_table;
 	tbl->session = ses;
 	status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
-	if (status) /* -ENOMEM */
+	if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
 		return status;
 	/* Back channel */
 	tbl = &ses->bc_slot_table;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index b34ada9..e3ea2c5 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -70,6 +70,7 @@
 
 enum nfs4_session_state {
 	NFS4_SESSION_INITING,
+	NFS4_SESSION_ESTABLISHED,
 };
 
 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
@@ -118,6 +119,12 @@
 	return 0;
 }
 
+static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
+		const struct nfs4_sessionid *src)
+{
+	memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
+}
+
 #ifdef CONFIG_CRC32
 /*
  * nfs_session_id_hash - calculate the crc32 hash for the session id
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5ad908e..f95e3b5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -346,9 +346,23 @@
 	status = nfs4_proc_exchange_id(clp, cred);
 	if (status != NFS4_OK)
 		return status;
-	set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 
-	return nfs41_walk_client_list(clp, result, cred);
+	status = nfs41_walk_client_list(clp, result, cred);
+	if (status < 0)
+		return status;
+	if (clp != *result)
+		return 0;
+
+	/* Purge state if the client id was established in a prior instance */
+	if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R)
+		set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+	else
+		set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+	nfs4_schedule_state_manager(clp);
+	status = nfs_wait_client_init_complete(clp);
+	if (status < 0)
+		nfs_put_client(clp);
+	return status;
 }
 
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e23a0a6..5c399ec 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1715,17 +1715,17 @@
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
 static void encode_bind_conn_to_session(struct xdr_stream *xdr,
-				   struct nfs4_session *session,
+				   struct nfs41_bind_conn_to_session_args *args,
 				   struct compound_hdr *hdr)
 {
 	__be32 *p;
 
 	encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
 		decode_bind_conn_to_session_maxsz, hdr);
-	encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+	encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN);
 	p = xdr_reserve_space(xdr, 8);
-	*p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
-	*p = 0;	/* use_conn_in_rdma_mode = False */
+	*p++ = cpu_to_be32(args->dir);
+	*p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
 }
 
 static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
@@ -1806,8 +1806,8 @@
 
 	encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
 	p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
-	p = xdr_encode_hyper(p, clp->cl_clientid);
-	*p++ = cpu_to_be32(clp->cl_seqid);			/*Sequence id */
+	p = xdr_encode_hyper(p, args->clientid);
+	*p++ = cpu_to_be32(args->seqid);			/*Sequence id */
 	*p++ = cpu_to_be32(args->flags);			/*flags */
 
 	/* Fore Channel */
@@ -2734,14 +2734,14 @@
  */
 static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
 				struct xdr_stream *xdr,
-				struct nfs_client *clp)
+				struct nfs41_bind_conn_to_session_args *args)
 {
 	struct compound_hdr hdr = {
-		.minorversion = clp->cl_mvops->minor_version,
+		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
 	encode_compound_hdr(xdr, req, &hdr);
-	encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+	encode_bind_conn_to_session(xdr, args, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -5613,7 +5613,7 @@
 
 	status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
 	if (!status)
-		status = decode_sessionid(xdr, &res->session->sess_id);
+		status = decode_sessionid(xdr, &res->sessionid);
 	if (unlikely(status))
 		return status;
 
@@ -5641,12 +5641,10 @@
 {
 	__be32 *p;
 	int status;
-	struct nfs_client *clp = res->client;
-	struct nfs4_session *session = clp->cl_session;
 
 	status = decode_op_hdr(xdr, OP_CREATE_SESSION);
 	if (!status)
-		status = decode_sessionid(xdr, &session->sess_id);
+		status = decode_sessionid(xdr, &res->sessionid);
 	if (unlikely(status))
 		return status;
 
@@ -5654,13 +5652,13 @@
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
-	clp->cl_seqid = be32_to_cpup(p++);
-	session->flags = be32_to_cpup(p);
+	res->seqid = be32_to_cpup(p++);
+	res->flags = be32_to_cpup(p);
 
 	/* Channel attributes */
-	status = decode_chan_attrs(xdr, &session->fc_attrs);
+	status = decode_chan_attrs(xdr, &res->fc_attrs);
 	if (!status)
-		status = decode_chan_attrs(xdr, &session->bc_attrs);
+		status = decode_chan_attrs(xdr, &res->bc_attrs);
 	return status;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 797cd62..635f086 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -344,6 +344,10 @@
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
 						 struct xdr_stream *xdr,
 						 gfp_t gfp_flags);
+void pnfs_layout_mark_request_commit(struct nfs_page *req,
+				     struct pnfs_layout_segment *lseg,
+				     struct nfs_commit_info *cinfo,
+				     u32 ds_commit_idx);
 
 static inline bool nfs_have_layout(struct inode *inode)
 {
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index fdc4f65..54e36b3 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -838,3 +838,33 @@
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
+
+void
+pnfs_layout_mark_request_commit(struct nfs_page *req,
+				struct pnfs_layout_segment *lseg,
+				struct nfs_commit_info *cinfo,
+				u32 ds_commit_idx)
+{
+	struct list_head *list;
+	struct pnfs_commit_bucket *buckets;
+
+	spin_lock(cinfo->lock);
+	buckets = cinfo->ds->buckets;
+	list = &buckets[ds_commit_idx].written;
+	if (list_empty(list)) {
+		/* Non-empty buckets hold a reference on the lseg.  That ref
+		 * is normally transferred to the COMMIT call and released
+		 * there.  It could also be released if the last req is pulled
+		 * off due to a rewrite, in which case it will be done in
+		 * pnfs_common_clear_request_commit
+		 */
+		WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+		buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+	}
+	set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+	cinfo->ds->nwritten++;
+	spin_unlock(cinfo->lock);
+
+	nfs_request_add_commit_list(req, list, cinfo);
+}
+EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b09cc23..c63189a 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -139,7 +139,7 @@
 	nfs_fattr_init(fattr);
 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
 	if (status == 0)
-		nfs_setattr_update_inode(inode, sattr);
+		nfs_setattr_update_inode(inode, sattr, fattr);
 	dprintk("NFS reply setattr: %d\n", status);
 	return status;
 }
@@ -609,10 +609,8 @@
 
 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
 {
-	struct inode *inode = hdr->inode;
-
 	if (task->tk_status >= 0)
-		nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr);
+		nfs_writeback_update_inode(hdr);
 	return 0;
 }
 
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 88a6d21..849ed78 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -789,13 +789,8 @@
 	nfs_list_add_request(req, dst);
 	cinfo->mds->ncommit++;
 	spin_unlock(cinfo->lock);
-	if (!cinfo->dreq) {
-		inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-		inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-			     BDI_RECLAIMABLE);
-		__mark_inode_dirty(req->wb_context->dentry->d_inode,
-				   I_DIRTY_DATASYNC);
-	}
+	if (!cinfo->dreq)
+		nfs_mark_page_unstable(req->wb_page);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -1382,6 +1377,36 @@
 	return 0;
 }
 
+static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
+		struct nfs_fattr *fattr)
+{
+	struct nfs_pgio_args *argp = &hdr->args;
+	struct nfs_pgio_res *resp = &hdr->res;
+
+	if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+		return;
+	if (argp->offset + resp->count != fattr->size)
+		return;
+	if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
+		return;
+	/* Set attribute barrier */
+	nfs_fattr_set_barrier(fattr);
+}
+
+void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
+{
+	struct nfs_fattr *fattr = hdr->res.fattr;
+	struct inode *inode = hdr->inode;
+
+	if (fattr == NULL)
+		return;
+	spin_lock(&inode->i_lock);
+	nfs_writeback_check_extend(hdr, fattr);
+	nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
+	spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
+
 /*
  * This function is called when the WRITE call is complete.
  */
@@ -1605,11 +1630,8 @@
 		req = nfs_list_entry(page_list->next);
 		nfs_list_remove_request(req);
 		nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
-		if (!cinfo->dreq) {
-			dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-			dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-				     BDI_RECLAIMABLE);
-		}
+		if (!cinfo->dreq)
+			nfs_clear_page_commit(req->wb_page);
 		nfs_unlock_and_release_request(req);
 	}
 }
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index cc6a760..1c307f0 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -583,7 +583,7 @@
 	if (status)
 		return status;
 	status = -ENOTDIR;
-	if (S_ISDIR(path.dentry->d_inode->i_mode)) {
+	if (d_is_dir(path.dentry)) {
 		strcpy(user_recovery_dirname, recdir);
 		status = 0;
 	}
@@ -1426,7 +1426,7 @@
 	nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
 	status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
 	if (!status) {
-		status = S_ISDIR(path.dentry->d_inode->i_mode);
+		status = d_is_dir(path.dentry);
 		path_put(&path);
 		if (status)
 			goto do_init;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6b2a09..d2f2c37 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1638,7 +1638,7 @@
 		nfs4_put_stid(&dp->dl_stid);
 	}
 	while (!list_empty(&clp->cl_revoked)) {
-		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
+		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
 		list_del_init(&dp->dl_recall_lru);
 		nfs4_put_stid(&dp->dl_stid);
 	}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 965b478..e9fa966 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -114,8 +114,8 @@
 	 * We're exposing only the directories and symlinks that have to be
 	 * traversed on the way to real exports:
 	 */
-	if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
-		     !S_ISLNK(dentry->d_inode->i_mode)))
+	if (unlikely(!d_is_dir(dentry) &&
+		     !d_is_symlink(dentry)))
 		return nfserr_stale;
 	/*
 	 * A pseudoroot export gives permission to access only one
@@ -259,7 +259,7 @@
 		goto out;
 	}
 
-	if (S_ISDIR(dentry->d_inode->i_mode) &&
+	if (d_is_dir(dentry) &&
 			(dentry->d_flags & DCACHE_DISCONNECTED)) {
 		printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
 				dentry);
@@ -414,7 +414,7 @@
 {
 	fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
 	fh->ofh_generation = dentry->d_inode->i_generation;
-	if (S_ISDIR(dentry->d_inode->i_mode) ||
+	if (d_is_dir(dentry) ||
 	    (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
 		fh->ofh_dirino = 0;
 }
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 5685c67..3685265 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -615,9 +615,9 @@
 	export = fhp->fh_export;
 	dentry = fhp->fh_dentry;
 
-	if (S_ISREG(dentry->d_inode->i_mode))
+	if (d_is_reg(dentry))
 		map = nfs3_regaccess;
-	else if (S_ISDIR(dentry->d_inode->i_mode))
+	else if (d_is_dir(dentry))
 		map = nfs3_diraccess;
 	else
 		map = nfs3_anyaccess;
@@ -1402,7 +1402,7 @@
 
 		switch (createmode) {
 		case NFS3_CREATE_UNCHECKED:
-			if (! S_ISREG(dchild->d_inode->i_mode))
+			if (! d_is_reg(dchild))
 				goto out;
 			else if (truncp) {
 				/* in nfsv4, we need to treat this case a little
@@ -1615,7 +1615,7 @@
 	if (err)
 		goto out;
 	err = nfserr_isdir;
-	if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
+	if (d_is_dir(tfhp->fh_dentry))
 		goto out;
 	err = nfserr_perm;
 	if (!len)
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index b2e3ff3..ecdbae1 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -31,6 +31,8 @@
 #include "alloc.h"
 #include "dat.h"
 
+static void __nilfs_btree_init(struct nilfs_bmap *bmap);
+
 static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
 {
 	struct nilfs_btree_path *path;
@@ -368,6 +370,34 @@
 	return ret;
 }
 
+/**
+ * nilfs_btree_root_broken - verify consistency of btree root node
+ * @node: btree root node to be examined
+ * @ino: inode number
+ *
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ */
+static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+				   unsigned long ino)
+{
+	int level, flags, nchildren;
+	int ret = 0;
+
+	level = nilfs_btree_node_get_level(node);
+	flags = nilfs_btree_node_get_flags(node);
+	nchildren = nilfs_btree_node_get_nchildren(node);
+
+	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+		     level > NILFS_BTREE_LEVEL_MAX ||
+		     nchildren < 0 ||
+		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+			ino, level, flags, nchildren);
+		ret = 1;
+	}
+	return ret;
+}
+
 int nilfs_btree_broken_node_block(struct buffer_head *bh)
 {
 	int ret;
@@ -1713,7 +1743,7 @@
 
 	/* convert and insert */
 	dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
-	nilfs_btree_init(btree);
+	__nilfs_btree_init(btree);
 	if (nreq != NULL) {
 		nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
 		nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
@@ -2294,12 +2324,23 @@
 	.bop_gather_data	=	NULL,
 };
 
-int nilfs_btree_init(struct nilfs_bmap *bmap)
+static void __nilfs_btree_init(struct nilfs_bmap *bmap)
 {
 	bmap->b_ops = &nilfs_btree_ops;
 	bmap->b_nchildren_per_block =
 		NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
-	return 0;
+}
+
+int nilfs_btree_init(struct nilfs_bmap *bmap)
+{
+	int ret = 0;
+
+	__nilfs_btree_init(bmap);
+
+	if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
+				    bmap->b_inode->i_ino))
+		ret = -EIO;
+	return ret;
 }
 
 void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 469086b..0c3f303 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1907,6 +1907,7 @@
 					     struct the_nilfs *nilfs)
 {
 	struct nilfs_inode_info *ii, *n;
+	int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
 	int defer_iput = false;
 
 	spin_lock(&nilfs->ns_inode_lock);
@@ -1919,10 +1920,10 @@
 		brelse(ii->i_bh);
 		ii->i_bh = NULL;
 		list_del_init(&ii->i_dirty);
-		if (!ii->vfs_inode.i_nlink) {
+		if (!ii->vfs_inode.i_nlink || during_mount) {
 			/*
-			 * Defer calling iput() to avoid a deadlock
-			 * over I_SYNC flag for inodes with i_nlink == 0
+			 * Defer calling iput() to avoid deadlocks if
+			 * i_nlink == 0 or mount is not yet finished.
 			 */
 			list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
 			defer_iput = true;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 51ceb81..d2f97ec 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -115,8 +115,8 @@
 		return false;
 
 	/* sorry, fanotify only gives a damn about files and dirs */
-	if (!S_ISREG(path->dentry->d_inode->i_mode) &&
-	    !S_ISDIR(path->dentry->d_inode->i_mode))
+	if (!d_is_reg(path->dentry) &&
+	    !d_can_lookup(path->dentry))
 		return false;
 
 	if (inode_mark && vfsmnt_mark) {
@@ -139,11 +139,12 @@
 		BUG();
 	}
 
-	if (S_ISDIR(path->dentry->d_inode->i_mode) &&
+	if (d_is_dir(path->dentry) &&
 	    !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
 		return false;
 
-	if (event_mask & marks_mask & ~marks_ignored_mask)
+	if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
+				 ~marks_ignored_mask)
 		return true;
 
 	return false;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 8490c64..460c6c3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -502,7 +502,7 @@
 
 static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
 {
-	if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
 		return 1;
 	return 0;
 }
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 20e37a3..db64ce2 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -102,11 +102,11 @@
 					 | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
 					 | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \
 					 | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG	\
-					 | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO)
+					 | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \
+					 | OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
 #define OCFS2_FEATURE_RO_COMPAT_SUPP	(OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
 					 | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
-					 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \
-					 | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO)
+					 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
 
 /*
  * Heartbeat-only devices are missing journals and other files.  The
@@ -179,6 +179,11 @@
 #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO	0x4000
 
 /*
+ * Append Direct IO support
+ */
+#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO	0x8000
+
+/*
  * backup superblock flag is used to indicate that this volume
  * has backup superblocks.
  */
@@ -200,10 +205,6 @@
 #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA	0x0002
 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA	0x0004
 
-/*
- * Append Direct IO support
- */
-#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO	0x0008
 
 /* The byte offset of the first backup block will be 1G.
  * The following will be 4G, 16G, 64G, 256G and 1T.
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index ea10a87..24f6404 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -191,7 +191,6 @@
 		ovl_set_timestamps(upperdentry, stat);
 
 	return err;
-
 }
 
 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
@@ -385,7 +384,7 @@
 		struct kstat stat;
 		enum ovl_path_type type = ovl_path_type(dentry);
 
-		if (type != OVL_PATH_LOWER)
+		if (OVL_TYPE_UPPER(type))
 			break;
 
 		next = dget(dentry);
@@ -394,7 +393,7 @@
 			parent = dget_parent(next);
 
 			type = ovl_path_type(parent);
-			if (type != OVL_PATH_LOWER)
+			if (OVL_TYPE_UPPER(type))
 				break;
 
 			dput(next);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 8ffc4b9..d139405 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -19,7 +19,7 @@
 	int err;
 
 	dget(wdentry);
-	if (S_ISDIR(wdentry->d_inode->i_mode))
+	if (d_is_dir(wdentry))
 		err = ovl_do_rmdir(wdir, wdentry);
 	else
 		err = ovl_do_unlink(wdir, wdentry);
@@ -118,14 +118,14 @@
 
 static int ovl_set_opaque(struct dentry *upperdentry)
 {
-	return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
+	return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
 }
 
 static void ovl_remove_opaque(struct dentry *upperdentry)
 {
 	int err;
 
-	err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr);
+	err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
 	if (err) {
 		pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
 			upperdentry->d_name.name, err);
@@ -152,7 +152,7 @@
 	 * correct link count.  nlink=1 seems to pacify 'find' and
 	 * other utilities.
 	 */
-	if (type == OVL_PATH_MERGE)
+	if (OVL_TYPE_MERGE(type))
 		stat->nlink = 1;
 
 	return 0;
@@ -506,7 +506,7 @@
 	struct dentry *opaquedir = NULL;
 	int err;
 
-	if (is_dir) {
+	if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
 		opaquedir = ovl_check_empty_and_clear(dentry);
 		err = PTR_ERR(opaquedir);
 		if (IS_ERR(opaquedir))
@@ -630,7 +630,7 @@
 		goto out_drop_write;
 
 	type = ovl_path_type(dentry);
-	if (type == OVL_PATH_PURE_UPPER) {
+	if (OVL_TYPE_PURE_UPPER(type)) {
 		err = ovl_remove_upper(dentry, is_dir);
 	} else {
 		const struct cred *old_cred;
@@ -693,7 +693,7 @@
 	bool new_create = false;
 	bool cleanup_whiteout = false;
 	bool overwrite = !(flags & RENAME_EXCHANGE);
-	bool is_dir = S_ISDIR(old->d_inode->i_mode);
+	bool is_dir = d_is_dir(old);
 	bool new_is_dir = false;
 	struct dentry *opaquedir = NULL;
 	const struct cred *old_cred = NULL;
@@ -712,7 +712,7 @@
 	/* Don't copy up directory trees */
 	old_type = ovl_path_type(old);
 	err = -EXDEV;
-	if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir)
+	if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
 		goto out;
 
 	if (new->d_inode) {
@@ -720,30 +720,30 @@
 		if (err)
 			goto out;
 
-		if (S_ISDIR(new->d_inode->i_mode))
+		if (d_is_dir(new))
 			new_is_dir = true;
 
 		new_type = ovl_path_type(new);
 		err = -EXDEV;
-		if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir)
+		if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
 			goto out;
 
 		err = 0;
-		if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
+		if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
 			if (ovl_dentry_lower(old)->d_inode ==
 			    ovl_dentry_lower(new)->d_inode)
 				goto out;
 		}
-		if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
+		if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
 			if (ovl_dentry_upper(old)->d_inode ==
 			    ovl_dentry_upper(new)->d_inode)
 				goto out;
 		}
 	} else {
 		if (ovl_dentry_is_opaque(new))
-			new_type = OVL_PATH_UPPER;
+			new_type = __OVL_PATH_UPPER;
 		else
-			new_type = OVL_PATH_PURE_UPPER;
+			new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
 	}
 
 	err = ovl_want_write(old);
@@ -763,8 +763,8 @@
 			goto out_drop_write;
 	}
 
-	old_opaque = old_type != OVL_PATH_PURE_UPPER;
-	new_opaque = new_type != OVL_PATH_PURE_UPPER;
+	old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
+	new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
 
 	if (old_opaque || new_opaque) {
 		err = -ENOMEM;
@@ -787,7 +787,7 @@
 		old_cred = override_creds(override_cred);
 	}
 
-	if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
+	if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
 		opaquedir = ovl_check_empty_and_clear(new);
 		err = PTR_ERR(opaquedir);
 		if (IS_ERR(opaquedir)) {
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 07d74b2..04f1248 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -205,7 +205,7 @@
 
 static bool ovl_is_private_xattr(const char *name)
 {
-	return strncmp(name, "trusted.overlay.", 14) == 0;
+	return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
 }
 
 int ovl_setxattr(struct dentry *dentry, const char *name,
@@ -238,7 +238,10 @@
 static bool ovl_need_xattr_filter(struct dentry *dentry,
 				  enum ovl_path_type type)
 {
-	return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+	if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
+		return S_ISDIR(dentry->d_inode->i_mode);
+	else
+		return false;
 }
 
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
@@ -299,7 +302,7 @@
 	if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
 		goto out_drop_write;
 
-	if (type == OVL_PATH_LOWER) {
+	if (!OVL_TYPE_UPPER(type)) {
 		err = vfs_getxattr(realpath.dentry, name, NULL, 0);
 		if (err < 0)
 			goto out_drop_write;
@@ -321,7 +324,7 @@
 static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
 				  struct dentry *realdentry)
 {
-	if (type != OVL_PATH_LOWER)
+	if (OVL_TYPE_UPPER(type))
 		return false;
 
 	if (special_file(realdentry->d_inode->i_mode))
@@ -430,5 +433,4 @@
 	}
 
 	return inode;
-
 }
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 814bed3..17ac5af 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -12,13 +12,20 @@
 struct ovl_entry;
 
 enum ovl_path_type {
-	OVL_PATH_PURE_UPPER,
-	OVL_PATH_UPPER,
-	OVL_PATH_MERGE,
-	OVL_PATH_LOWER,
+	__OVL_PATH_PURE		= (1 << 0),
+	__OVL_PATH_UPPER	= (1 << 1),
+	__OVL_PATH_MERGE	= (1 << 2),
 };
 
-extern const char *ovl_opaque_xattr;
+#define OVL_TYPE_UPPER(type)	((type) & __OVL_PATH_UPPER)
+#define OVL_TYPE_MERGE(type)	((type) & __OVL_PATH_MERGE)
+#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
+#define OVL_TYPE_MERGE_OR_LOWER(type) \
+	(OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
+
+#define OVL_XATTR_PRE_NAME "trusted.overlay."
+#define OVL_XATTR_PRE_LEN  16
+#define OVL_XATTR_OPAQUE   OVL_XATTR_PRE_NAME"opaque"
 
 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -130,6 +137,7 @@
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index c020599..907870e 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -24,7 +24,6 @@
 	struct list_head l_node;
 	struct rb_node node;
 	bool is_whiteout;
-	bool is_cursor;
 	char name[];
 };
 
@@ -40,6 +39,7 @@
 	struct rb_root root;
 	struct list_head *list;
 	struct list_head middle;
+	struct dentry *dir;
 	int count;
 	int err;
 };
@@ -48,7 +48,7 @@
 	bool is_real;
 	bool is_upper;
 	struct ovl_dir_cache *cache;
-	struct ovl_cache_entry cursor;
+	struct list_head *cursor;
 	struct file *realfile;
 	struct file *upperfile;
 };
@@ -79,23 +79,49 @@
 	return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+						   const char *name, int len,
 						   u64 ino, unsigned int d_type)
 {
 	struct ovl_cache_entry *p;
 	size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
 
 	p = kmalloc(size, GFP_KERNEL);
-	if (p) {
-		memcpy(p->name, name, len);
-		p->name[len] = '\0';
-		p->len = len;
-		p->type = d_type;
-		p->ino = ino;
-		p->is_whiteout = false;
-		p->is_cursor = false;
-	}
+	if (!p)
+		return NULL;
 
+	memcpy(p->name, name, len);
+	p->name[len] = '\0';
+	p->len = len;
+	p->type = d_type;
+	p->ino = ino;
+	p->is_whiteout = false;
+
+	if (d_type == DT_CHR) {
+		struct dentry *dentry;
+		const struct cred *old_cred;
+		struct cred *override_cred;
+
+		override_cred = prepare_creds();
+		if (!override_cred) {
+			kfree(p);
+			return NULL;
+		}
+
+		/*
+		 * CAP_DAC_OVERRIDE for lookup
+		 */
+		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+		old_cred = override_creds(override_cred);
+
+		dentry = lookup_one_len(name, dir, len);
+		if (!IS_ERR(dentry)) {
+			p->is_whiteout = ovl_is_whiteout(dentry);
+			dput(dentry);
+		}
+		revert_creds(old_cred);
+		put_cred(override_cred);
+	}
 	return p;
 }
 
@@ -122,7 +148,7 @@
 			return 0;
 	}
 
-	p = ovl_cache_entry_new(name, len, ino, d_type);
+	p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
 	if (p == NULL)
 		return -ENOMEM;
 
@@ -143,7 +169,7 @@
 	if (p) {
 		list_move_tail(&p->l_node, &rdd->middle);
 	} else {
-		p = ovl_cache_entry_new(name, namelen, ino, d_type);
+		p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
 		if (p == NULL)
 			rdd->err = -ENOMEM;
 		else
@@ -168,7 +194,6 @@
 {
 	struct ovl_dir_cache *cache = od->cache;
 
-	list_del_init(&od->cursor.l_node);
 	WARN_ON(cache->refcount <= 0);
 	cache->refcount--;
 	if (!cache->refcount) {
@@ -204,6 +229,7 @@
 	if (IS_ERR(realfile))
 		return PTR_ERR(realfile);
 
+	rdd->dir = realpath->dentry;
 	rdd->ctx.pos = 0;
 	do {
 		rdd->count = 0;
@@ -227,108 +253,58 @@
 	if (cache && ovl_dentry_version_get(dentry) != cache->version) {
 		ovl_cache_put(od, dentry);
 		od->cache = NULL;
+		od->cursor = NULL;
 	}
-	WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
-	if (od->is_real && type == OVL_PATH_MERGE)
+	WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
+	if (od->is_real && OVL_TYPE_MERGE(type))
 		od->is_real = false;
 }
 
-static int ovl_dir_mark_whiteouts(struct dentry *dir,
-				  struct ovl_readdir_data *rdd)
-{
-	struct ovl_cache_entry *p;
-	struct dentry *dentry;
-	const struct cred *old_cred;
-	struct cred *override_cred;
-
-	override_cred = prepare_creds();
-	if (!override_cred) {
-		ovl_cache_free(rdd->list);
-		return -ENOMEM;
-	}
-
-	/*
-	 * CAP_DAC_OVERRIDE for lookup
-	 */
-	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-	old_cred = override_creds(override_cred);
-
-	mutex_lock(&dir->d_inode->i_mutex);
-	list_for_each_entry(p, rdd->list, l_node) {
-		if (p->is_cursor)
-			continue;
-
-		if (p->type != DT_CHR)
-			continue;
-
-		dentry = lookup_one_len(p->name, dir, p->len);
-		if (IS_ERR(dentry))
-			continue;
-
-		p->is_whiteout = ovl_is_whiteout(dentry);
-		dput(dentry);
-	}
-	mutex_unlock(&dir->d_inode->i_mutex);
-
-	revert_creds(old_cred);
-	put_cred(override_cred);
-
-	return 0;
-}
-
 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
 	int err;
-	struct path lowerpath;
-	struct path upperpath;
+	struct path realpath;
 	struct ovl_readdir_data rdd = {
 		.ctx.actor = ovl_fill_merge,
 		.list = list,
 		.root = RB_ROOT,
 		.is_merge = false,
 	};
+	int idx, next;
 
-	ovl_path_lower(dentry, &lowerpath);
-	ovl_path_upper(dentry, &upperpath);
+	for (idx = 0; idx != -1; idx = next) {
+		next = ovl_path_next(idx, dentry, &realpath);
 
-	if (upperpath.dentry) {
-		err = ovl_dir_read(&upperpath, &rdd);
-		if (err)
-			goto out;
-
-		if (lowerpath.dentry) {
-			err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
+		if (next != -1) {
+			err = ovl_dir_read(&realpath, &rdd);
 			if (err)
-				goto out;
+				break;
+		} else {
+			/*
+			 * Insert lowest layer entries before upper ones, this
+			 * allows offsets to be reasonably constant
+			 */
+			list_add(&rdd.middle, rdd.list);
+			rdd.is_merge = true;
+			err = ovl_dir_read(&realpath, &rdd);
+			list_del(&rdd.middle);
 		}
 	}
-	if (lowerpath.dentry) {
-		/*
-		 * Insert lowerpath entries before upperpath ones, this allows
-		 * offsets to be reasonably constant
-		 */
-		list_add(&rdd.middle, rdd.list);
-		rdd.is_merge = true;
-		err = ovl_dir_read(&lowerpath, &rdd);
-		list_del(&rdd.middle);
-	}
-out:
 	return err;
 }
 
 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 {
-	struct ovl_cache_entry *p;
+	struct list_head *p;
 	loff_t off = 0;
 
-	list_for_each_entry(p, &od->cache->entries, l_node) {
-		if (p->is_cursor)
-			continue;
+	list_for_each(p, &od->cache->entries) {
 		if (off >= pos)
 			break;
 		off++;
 	}
-	list_move_tail(&od->cursor.l_node, &p->l_node);
+	/* Cursor is safe since the cache is stable */
+	od->cursor = p;
 }
 
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
@@ -367,6 +343,7 @@
 {
 	struct ovl_dir_file *od = file->private_data;
 	struct dentry *dentry = file->f_path.dentry;
+	struct ovl_cache_entry *p;
 
 	if (!ctx->pos)
 		ovl_dir_reset(file);
@@ -385,19 +362,13 @@
 		ovl_seek_cursor(od, ctx->pos);
 	}
 
-	while (od->cursor.l_node.next != &od->cache->entries) {
-		struct ovl_cache_entry *p;
-
-		p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
-		/* Skip cursors */
-		if (!p->is_cursor) {
-			if (!p->is_whiteout) {
-				if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
-					break;
-			}
-			ctx->pos++;
-		}
-		list_move(&od->cursor.l_node, &p->l_node);
+	while (od->cursor != &od->cache->entries) {
+		p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
+		if (!p->is_whiteout)
+			if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
+				break;
+		od->cursor = p->l_node.next;
+		ctx->pos++;
 	}
 	return 0;
 }
@@ -452,7 +423,7 @@
 	/*
 	 * Need to check if we started out being a lower dir, but got copied up
 	 */
-	if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
+	if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
 		struct inode *inode = file_inode(file);
 
 		realfile = lockless_dereference(od->upperfile);
@@ -516,11 +487,9 @@
 		kfree(od);
 		return PTR_ERR(realfile);
 	}
-	INIT_LIST_HEAD(&od->cursor.l_node);
 	od->realfile = realfile;
-	od->is_real = (type != OVL_PATH_MERGE);
-	od->is_upper = (type != OVL_PATH_LOWER);
-	od->cursor.is_cursor = true;
+	od->is_real = !OVL_TYPE_MERGE(type);
+	od->is_upper = OVL_TYPE_UPPER(type);
 	file->private_data = od;
 
 	return 0;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index f16d318..5f0d199 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -35,7 +35,8 @@
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
 	struct vfsmount *upper_mnt;
-	struct vfsmount *lower_mnt;
+	unsigned numlower;
+	struct vfsmount **lower_mnt;
 	struct dentry *workdir;
 	long lower_namelen;
 	/* pathnames of lower and upper dirs, for show_options */
@@ -47,7 +48,6 @@
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
 	struct dentry *__upperdentry;
-	struct dentry *lowerdentry;
 	struct ovl_dir_cache *cache;
 	union {
 		struct {
@@ -56,30 +56,36 @@
 		};
 		struct rcu_head rcu;
 	};
+	unsigned numlower;
+	struct path lowerstack[];
 };
 
-const char *ovl_opaque_xattr = "trusted.overlay.opaque";
+#define OVL_MAX_STACK 500
 
+static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
+{
+	return oe->numlower ? oe->lowerstack[0].dentry : NULL;
+}
 
 enum ovl_path_type ovl_path_type(struct dentry *dentry)
 {
 	struct ovl_entry *oe = dentry->d_fsdata;
+	enum ovl_path_type type = 0;
 
 	if (oe->__upperdentry) {
-		if (oe->lowerdentry) {
+		type = __OVL_PATH_UPPER;
+
+		if (oe->numlower) {
 			if (S_ISDIR(dentry->d_inode->i_mode))
-				return OVL_PATH_MERGE;
-			else
-				return OVL_PATH_UPPER;
-		} else {
-			if (oe->opaque)
-				return OVL_PATH_UPPER;
-			else
-				return OVL_PATH_PURE_UPPER;
+				type |= __OVL_PATH_MERGE;
+		} else if (!oe->opaque) {
+			type |= __OVL_PATH_PURE;
 		}
 	} else {
-		return OVL_PATH_LOWER;
+		if (oe->numlower > 1)
+			type |= __OVL_PATH_MERGE;
 	}
+	return type;
 }
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
@@ -98,10 +104,9 @@
 
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
 {
-
 	enum ovl_path_type type = ovl_path_type(dentry);
 
-	if (type == OVL_PATH_LOWER)
+	if (!OVL_TYPE_UPPER(type))
 		ovl_path_lower(dentry, path);
 	else
 		ovl_path_upper(dentry, path);
@@ -120,7 +125,7 @@
 {
 	struct ovl_entry *oe = dentry->d_fsdata;
 
-	return oe->lowerdentry;
+	return __ovl_dentry_lower(oe);
 }
 
 struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -130,7 +135,7 @@
 
 	realdentry = ovl_upperdentry_dereference(oe);
 	if (!realdentry)
-		realdentry = oe->lowerdentry;
+		realdentry = __ovl_dentry_lower(oe);
 
 	return realdentry;
 }
@@ -143,7 +148,7 @@
 	if (realdentry) {
 		*is_upper = true;
 	} else {
-		realdentry = oe->lowerdentry;
+		realdentry = __ovl_dentry_lower(oe);
 		*is_upper = false;
 	}
 	return realdentry;
@@ -165,11 +170,9 @@
 
 void ovl_path_lower(struct dentry *dentry, struct path *path)
 {
-	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
 	struct ovl_entry *oe = dentry->d_fsdata;
 
-	path->mnt = ofs->lower_mnt;
-	path->dentry = oe->lowerdentry;
+	*path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
 }
 
 int ovl_want_write(struct dentry *dentry)
@@ -249,7 +252,7 @@
 	if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
 		return false;
 
-	res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1);
+	res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
 	if (res == 1 && val == 'y')
 		return true;
 
@@ -261,8 +264,11 @@
 	struct ovl_entry *oe = dentry->d_fsdata;
 
 	if (oe) {
+		unsigned int i;
+
 		dput(oe->__upperdentry);
-		dput(oe->lowerdentry);
+		for (i = 0; i < oe->numlower; i++)
+			dput(oe->lowerstack[i].dentry);
 		kfree_rcu(oe, rcu);
 	}
 }
@@ -271,9 +277,15 @@
 	.d_release = ovl_dentry_release,
 };
 
-static struct ovl_entry *ovl_alloc_entry(void)
+static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
 {
-	return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
+	size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
+	struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+
+	if (oe)
+		oe->numlower = numlower;
+
+	return oe;
 }
 
 static inline struct dentry *ovl_lookup_real(struct dentry *dir,
@@ -295,82 +307,154 @@
 	return dentry;
 }
 
+/*
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	BUG_ON(idx < 0);
+	if (idx == 0) {
+		ovl_path_upper(dentry, path);
+		if (path->dentry)
+			return oe->numlower ? 1 : -1;
+		idx++;
+	}
+	BUG_ON(idx > oe->numlower);
+	*path = oe->lowerstack[idx - 1];
+
+	return (idx < oe->numlower) ? idx + 1 : -1;
+}
+
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 			  unsigned int flags)
 {
 	struct ovl_entry *oe;
-	struct dentry *upperdir;
-	struct dentry *lowerdir;
-	struct dentry *upperdentry = NULL;
-	struct dentry *lowerdentry = NULL;
+	struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+	struct path *stack = NULL;
+	struct dentry *upperdir, *upperdentry = NULL;
+	unsigned int ctr = 0;
 	struct inode *inode = NULL;
+	bool upperopaque = false;
+	struct dentry *this, *prev = NULL;
+	unsigned int i;
 	int err;
 
-	err = -ENOMEM;
-	oe = ovl_alloc_entry();
-	if (!oe)
-		goto out;
-
-	upperdir = ovl_dentry_upper(dentry->d_parent);
-	lowerdir = ovl_dentry_lower(dentry->d_parent);
-
+	upperdir = ovl_upperdentry_dereference(poe);
 	if (upperdir) {
-		upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
-		err = PTR_ERR(upperdentry);
-		if (IS_ERR(upperdentry))
-			goto out_put_dir;
+		this = ovl_lookup_real(upperdir, &dentry->d_name);
+		err = PTR_ERR(this);
+		if (IS_ERR(this))
+			goto out;
 
-		if (lowerdir && upperdentry) {
-			if (ovl_is_whiteout(upperdentry)) {
-				dput(upperdentry);
-				upperdentry = NULL;
-				oe->opaque = true;
-			} else if (ovl_is_opaquedir(upperdentry)) {
-				oe->opaque = true;
+		if (this) {
+			if (ovl_is_whiteout(this)) {
+				dput(this);
+				this = NULL;
+				upperopaque = true;
+			} else if (poe->numlower && ovl_is_opaquedir(this)) {
+				upperopaque = true;
 			}
 		}
-	}
-	if (lowerdir && !oe->opaque) {
-		lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
-		err = PTR_ERR(lowerdentry);
-		if (IS_ERR(lowerdentry))
-			goto out_dput_upper;
+		upperdentry = prev = this;
 	}
 
-	if (lowerdentry && upperdentry &&
-	    (!S_ISDIR(upperdentry->d_inode->i_mode) ||
-	     !S_ISDIR(lowerdentry->d_inode->i_mode))) {
-		dput(lowerdentry);
-		lowerdentry = NULL;
-		oe->opaque = true;
+	if (!upperopaque && poe->numlower) {
+		err = -ENOMEM;
+		stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
+		if (!stack)
+			goto out_put_upper;
 	}
 
-	if (lowerdentry || upperdentry) {
+	for (i = 0; !upperopaque && i < poe->numlower; i++) {
+		bool opaque = false;
+		struct path lowerpath = poe->lowerstack[i];
+
+		this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
+		err = PTR_ERR(this);
+		if (IS_ERR(this)) {
+			/*
+			 * If it's positive, then treat ENAMETOOLONG as ENOENT.
+			 */
+			if (err == -ENAMETOOLONG && (upperdentry || ctr))
+				continue;
+			goto out_put;
+		}
+		if (!this)
+			continue;
+		if (ovl_is_whiteout(this)) {
+			dput(this);
+			break;
+		}
+		/*
+		 * Only makes sense to check opaque dir if this is not the
+		 * lowermost layer.
+		 */
+		if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
+			opaque = true;
+
+		if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
+			     !S_ISDIR(this->d_inode->i_mode))) {
+			/*
+			 * FIXME: check for upper-opaqueness maybe better done
+			 * in remove code.
+			 */
+			if (prev == upperdentry)
+				upperopaque = true;
+			dput(this);
+			break;
+		}
+		/*
+		 * If this is a non-directory then stop here.
+		 */
+		if (!S_ISDIR(this->d_inode->i_mode))
+			opaque = true;
+
+		stack[ctr].dentry = this;
+		stack[ctr].mnt = lowerpath.mnt;
+		ctr++;
+		prev = this;
+		if (opaque)
+			break;
+	}
+
+	oe = ovl_alloc_entry(ctr);
+	err = -ENOMEM;
+	if (!oe)
+		goto out_put;
+
+	if (upperdentry || ctr) {
 		struct dentry *realdentry;
 
-		realdentry = upperdentry ? upperdentry : lowerdentry;
+		realdentry = upperdentry ? upperdentry : stack[0].dentry;
+
 		err = -ENOMEM;
 		inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
 				      oe);
 		if (!inode)
-			goto out_dput;
+			goto out_free_oe;
 		ovl_copyattr(realdentry->d_inode, inode);
 	}
 
+	oe->opaque = upperopaque;
 	oe->__upperdentry = upperdentry;
-	oe->lowerdentry = lowerdentry;
-
+	memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+	kfree(stack);
 	dentry->d_fsdata = oe;
 	d_add(dentry, inode);
 
 	return NULL;
 
-out_dput:
-	dput(lowerdentry);
-out_dput_upper:
-	dput(upperdentry);
-out_put_dir:
+out_free_oe:
 	kfree(oe);
+out_put:
+	for (i = 0; i < ctr; i++)
+		dput(stack[i].dentry);
+	kfree(stack);
+out_put_upper:
+	dput(upperdentry);
 out:
 	return ERR_PTR(err);
 }
@@ -383,10 +467,12 @@
 static void ovl_put_super(struct super_block *sb)
 {
 	struct ovl_fs *ufs = sb->s_fs_info;
+	unsigned i;
 
 	dput(ufs->workdir);
 	mntput(ufs->upper_mnt);
-	mntput(ufs->lower_mnt);
+	for (i = 0; i < ufs->numlower; i++)
+		mntput(ufs->lower_mnt[i]);
 
 	kfree(ufs->config.lowerdir);
 	kfree(ufs->config.upperdir);
@@ -400,7 +486,7 @@
  * @buf: The struct kstatfs to fill in with stats
  *
  * Get the filesystem statistics.  As writes always target the upper layer
- * filesystem pass the statfs to the same filesystem.
+ * filesystem pass the statfs to the upper filesystem (if it exists)
  */
 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -409,7 +495,7 @@
 	struct path path;
 	int err;
 
-	ovl_path_upper(root_dentry, &path);
+	ovl_path_real(root_dentry, &path);
 
 	err = vfs_statfs(&path, buf);
 	if (!err) {
@@ -432,8 +518,20 @@
 	struct ovl_fs *ufs = sb->s_fs_info;
 
 	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
-	seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-	seq_printf(m, ",workdir=%s", ufs->config.workdir);
+	if (ufs->config.upperdir) {
+		seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+		seq_printf(m, ",workdir=%s", ufs->config.workdir);
+	}
+	return 0;
+}
+
+static int ovl_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct ovl_fs *ufs = sb->s_fs_info;
+
+	if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
+		return -EROFS;
+
 	return 0;
 }
 
@@ -441,6 +539,7 @@
 	.put_super	= ovl_put_super,
 	.statfs		= ovl_statfs,
 	.show_options	= ovl_show_options,
+	.remount_fs	= ovl_remount,
 };
 
 enum {
@@ -515,9 +614,19 @@
 			break;
 
 		default:
+			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
 			return -EINVAL;
 		}
 	}
+
+	/* Workdir is useless in non-upper mount */
+	if (!config->upperdir && config->workdir) {
+		pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+			config->workdir);
+		kfree(config->workdir);
+		config->workdir = NULL;
+	}
+
 	return 0;
 }
 
@@ -585,24 +694,6 @@
 	}
 }
 
-static int ovl_mount_dir(const char *name, struct path *path)
-{
-	int err;
-	char *tmp = kstrdup(name, GFP_KERNEL);
-
-	if (!tmp)
-		return -ENOMEM;
-
-	ovl_unescape(tmp);
-	err = kern_path(tmp, LOOKUP_FOLLOW, path);
-	if (err) {
-		pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
-		err = -EINVAL;
-	}
-	kfree(tmp);
-	return err;
-}
-
 static bool ovl_is_allowed_fs_type(struct dentry *root)
 {
 	const struct dentry_operations *dop = root->d_op;
@@ -622,6 +713,75 @@
 	return true;
 }
 
+static int ovl_mount_dir_noesc(const char *name, struct path *path)
+{
+	int err = -EINVAL;
+
+	if (!*name) {
+		pr_err("overlayfs: empty lowerdir\n");
+		goto out;
+	}
+	err = kern_path(name, LOOKUP_FOLLOW, path);
+	if (err) {
+		pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+		goto out;
+	}
+	err = -EINVAL;
+	if (!ovl_is_allowed_fs_type(path->dentry)) {
+		pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+		goto out_put;
+	}
+	if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
+		pr_err("overlayfs: '%s' not a directory\n", name);
+		goto out_put;
+	}
+	return 0;
+
+out_put:
+	path_put(path);
+out:
+	return err;
+}
+
+static int ovl_mount_dir(const char *name, struct path *path)
+{
+	int err = -ENOMEM;
+	char *tmp = kstrdup(name, GFP_KERNEL);
+
+	if (tmp) {
+		ovl_unescape(tmp);
+		err = ovl_mount_dir_noesc(tmp, path);
+		kfree(tmp);
+	}
+	return err;
+}
+
+static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
+			 int *stack_depth)
+{
+	int err;
+	struct kstatfs statfs;
+
+	err = ovl_mount_dir_noesc(name, path);
+	if (err)
+		goto out;
+
+	err = vfs_statfs(path, &statfs);
+	if (err) {
+		pr_err("overlayfs: statfs failed on '%s'\n", name);
+		goto out_put;
+	}
+	*namelen = max(*namelen, statfs.f_namelen);
+	*stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
+
+	return 0;
+
+out_put:
+	path_put(path);
+out:
+	return err;
+}
+
 /* Workdir should not be subdir of upperdir and vice versa */
 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
 {
@@ -634,16 +794,39 @@
 	return ok;
 }
 
+static unsigned int ovl_split_lowerdirs(char *str)
+{
+	unsigned int ctr = 1;
+	char *s, *d;
+
+	for (s = d = str;; s++, d++) {
+		if (*s == '\\') {
+			s++;
+		} else if (*s == ':') {
+			*d = '\0';
+			ctr++;
+			continue;
+		}
+		*d = *s;
+		if (!*s)
+			break;
+	}
+	return ctr;
+}
+
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
-	struct path lowerpath;
-	struct path upperpath;
-	struct path workpath;
-	struct inode *root_inode;
+	struct path upperpath = { NULL, NULL };
+	struct path workpath = { NULL, NULL };
 	struct dentry *root_dentry;
 	struct ovl_entry *oe;
 	struct ovl_fs *ufs;
-	struct kstatfs statfs;
+	struct path *stack = NULL;
+	char *lowertmp;
+	char *lower;
+	unsigned int numlower;
+	unsigned int stacklen = 0;
+	unsigned int i;
 	int err;
 
 	err = -ENOMEM;
@@ -655,123 +838,147 @@
 	if (err)
 		goto out_free_config;
 
-	/* FIXME: workdir is not needed for a R/O mount */
 	err = -EINVAL;
-	if (!ufs->config.upperdir || !ufs->config.lowerdir ||
-	    !ufs->config.workdir) {
-		pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
+	if (!ufs->config.lowerdir) {
+		pr_err("overlayfs: missing 'lowerdir'\n");
 		goto out_free_config;
 	}
 
+	sb->s_stack_depth = 0;
+	if (ufs->config.upperdir) {
+		if (!ufs->config.workdir) {
+			pr_err("overlayfs: missing 'workdir'\n");
+			goto out_free_config;
+		}
+
+		err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
+		if (err)
+			goto out_free_config;
+
+		/* Upper fs should not be r/o */
+		if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) {
+			pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
+			err = -EINVAL;
+			goto out_put_upperpath;
+		}
+
+		err = ovl_mount_dir(ufs->config.workdir, &workpath);
+		if (err)
+			goto out_put_upperpath;
+
+		err = -EINVAL;
+		if (upperpath.mnt != workpath.mnt) {
+			pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+			goto out_put_workpath;
+		}
+		if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
+			pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+			goto out_put_workpath;
+		}
+		sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth;
+	}
+	err = -ENOMEM;
+	lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL);
+	if (!lowertmp)
+		goto out_put_workpath;
+
+	err = -EINVAL;
+	stacklen = ovl_split_lowerdirs(lowertmp);
+	if (stacklen > OVL_MAX_STACK) {
+		pr_err("overlayfs: too many lower directries, limit is %d\n",
+		       OVL_MAX_STACK);
+		goto out_free_lowertmp;
+	} else if (!ufs->config.upperdir && stacklen == 1) {
+		pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
+		goto out_free_lowertmp;
+	}
+
+	stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+	if (!stack)
+		goto out_free_lowertmp;
+
+	lower = lowertmp;
+	for (numlower = 0; numlower < stacklen; numlower++) {
+		err = ovl_lower_dir(lower, &stack[numlower],
+				    &ufs->lower_namelen, &sb->s_stack_depth);
+		if (err)
+			goto out_put_lowerpath;
+
+		lower = strchr(lower, '\0') + 1;
+	}
+
+	err = -EINVAL;
+	sb->s_stack_depth++;
+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		pr_err("overlayfs: maximum fs stacking depth exceeded\n");
+		goto out_put_lowerpath;
+	}
+
+	if (ufs->config.upperdir) {
+		ufs->upper_mnt = clone_private_mount(&upperpath);
+		err = PTR_ERR(ufs->upper_mnt);
+		if (IS_ERR(ufs->upper_mnt)) {
+			pr_err("overlayfs: failed to clone upperpath\n");
+			goto out_put_lowerpath;
+		}
+
+		ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
+		err = PTR_ERR(ufs->workdir);
+		if (IS_ERR(ufs->workdir)) {
+			pr_err("overlayfs: failed to create directory %s/%s\n",
+			       ufs->config.workdir, OVL_WORKDIR_NAME);
+			goto out_put_upper_mnt;
+		}
+	}
+
 	err = -ENOMEM;
-	oe = ovl_alloc_entry();
-	if (oe == NULL)
-		goto out_free_config;
+	ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL);
+	if (ufs->lower_mnt == NULL)
+		goto out_put_workdir;
+	for (i = 0; i < numlower; i++) {
+		struct vfsmount *mnt = clone_private_mount(&stack[i]);
 
-	err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
-	if (err)
-		goto out_free_oe;
+		err = PTR_ERR(mnt);
+		if (IS_ERR(mnt)) {
+			pr_err("overlayfs: failed to clone lowerpath\n");
+			goto out_put_lower_mnt;
+		}
+		/*
+		 * Make lower_mnt R/O.  That way fchmod/fchown on lower file
+		 * will fail instead of modifying lower fs.
+		 */
+		mnt->mnt_flags |= MNT_READONLY;
 
-	err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
-	if (err)
-		goto out_put_upperpath;
-
-	err = ovl_mount_dir(ufs->config.workdir, &workpath);
-	if (err)
-		goto out_put_lowerpath;
-
-	err = -EINVAL;
-	if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
-	    !S_ISDIR(lowerpath.dentry->d_inode->i_mode) ||
-	    !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
-		pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
-		goto out_put_workpath;
+		ufs->lower_mnt[ufs->numlower] = mnt;
+		ufs->numlower++;
 	}
 
-	if (upperpath.mnt != workpath.mnt) {
-		pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
-		goto out_put_workpath;
-	}
-	if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
-		pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
-		goto out_put_workpath;
-	}
-
-	if (!ovl_is_allowed_fs_type(upperpath.dentry)) {
-		pr_err("overlayfs: filesystem of upperdir is not supported\n");
-		goto out_put_workpath;
-	}
-
-	if (!ovl_is_allowed_fs_type(lowerpath.dentry)) {
-		pr_err("overlayfs: filesystem of lowerdir is not supported\n");
-		goto out_put_workpath;
-	}
-
-	err = vfs_statfs(&lowerpath, &statfs);
-	if (err) {
-		pr_err("overlayfs: statfs failed on lowerpath\n");
-		goto out_put_workpath;
-	}
-	ufs->lower_namelen = statfs.f_namelen;
-
-	sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
-				lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
-
-	err = -EINVAL;
-	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-		pr_err("overlayfs: maximum fs stacking depth exceeded\n");
-		goto out_put_workpath;
-	}
-
-	ufs->upper_mnt = clone_private_mount(&upperpath);
-	err = PTR_ERR(ufs->upper_mnt);
-	if (IS_ERR(ufs->upper_mnt)) {
-		pr_err("overlayfs: failed to clone upperpath\n");
-		goto out_put_workpath;
-	}
-
-	ufs->lower_mnt = clone_private_mount(&lowerpath);
-	err = PTR_ERR(ufs->lower_mnt);
-	if (IS_ERR(ufs->lower_mnt)) {
-		pr_err("overlayfs: failed to clone lowerpath\n");
-		goto out_put_upper_mnt;
-	}
-
-	ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
-	err = PTR_ERR(ufs->workdir);
-	if (IS_ERR(ufs->workdir)) {
-		pr_err("overlayfs: failed to create directory %s/%s\n",
-		       ufs->config.workdir, OVL_WORKDIR_NAME);
-		goto out_put_lower_mnt;
-	}
-
-	/*
-	 * Make lower_mnt R/O.  That way fchmod/fchown on lower file
-	 * will fail instead of modifying lower fs.
-	 */
-	ufs->lower_mnt->mnt_flags |= MNT_READONLY;
-
-	/* If the upper fs is r/o, we mark overlayfs r/o too */
-	if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
+	/* If the upper fs is nonexistent, we mark overlayfs r/o too */
+	if (!ufs->upper_mnt)
 		sb->s_flags |= MS_RDONLY;
 
 	sb->s_d_op = &ovl_dentry_operations;
 
 	err = -ENOMEM;
-	root_inode = ovl_new_inode(sb, S_IFDIR, oe);
-	if (!root_inode)
-		goto out_put_workdir;
+	oe = ovl_alloc_entry(numlower);
+	if (!oe)
+		goto out_put_lower_mnt;
 
-	root_dentry = d_make_root(root_inode);
+	root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
 	if (!root_dentry)
-		goto out_put_workdir;
+		goto out_free_oe;
 
 	mntput(upperpath.mnt);
-	mntput(lowerpath.mnt);
+	for (i = 0; i < numlower; i++)
+		mntput(stack[i].mnt);
 	path_put(&workpath);
+	kfree(lowertmp);
 
 	oe->__upperdentry = upperpath.dentry;
-	oe->lowerdentry = lowerpath.dentry;
+	for (i = 0; i < numlower; i++) {
+		oe->lowerstack[i].dentry = stack[i].dentry;
+		oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+	}
 
 	root_dentry->d_fsdata = oe;
 
@@ -782,20 +989,26 @@
 
 	return 0;
 
-out_put_workdir:
-	dput(ufs->workdir);
-out_put_lower_mnt:
-	mntput(ufs->lower_mnt);
-out_put_upper_mnt:
-	mntput(ufs->upper_mnt);
-out_put_workpath:
-	path_put(&workpath);
-out_put_lowerpath:
-	path_put(&lowerpath);
-out_put_upperpath:
-	path_put(&upperpath);
 out_free_oe:
 	kfree(oe);
+out_put_lower_mnt:
+	for (i = 0; i < ufs->numlower; i++)
+		mntput(ufs->lower_mnt[i]);
+	kfree(ufs->lower_mnt);
+out_put_workdir:
+	dput(ufs->workdir);
+out_put_upper_mnt:
+	mntput(ufs->upper_mnt);
+out_put_lowerpath:
+	for (i = 0; i < numlower; i++)
+		path_put(&stack[i]);
+	kfree(stack);
+out_free_lowertmp:
+	kfree(lowertmp);
+out_put_workpath:
+	path_put(&workpath);
+out_put_upperpath:
+	path_put(&upperpath);
 out_free_config:
 	kfree(ufs->config.lowerdir);
 	kfree(ufs->config.upperdir);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 0855f77..3a48bb7 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -564,13 +564,11 @@
 
 	*acl = posix_acl_clone(p, GFP_NOFS);
 	if (!*acl)
-		return -ENOMEM;
+		goto no_mem;
 
 	ret = posix_acl_create_masq(*acl, mode);
-	if (ret < 0) {
-		posix_acl_release(*acl);
-		return -ENOMEM;
-	}
+	if (ret < 0)
+		goto no_mem_clone;
 
 	if (ret == 0) {
 		posix_acl_release(*acl);
@@ -591,6 +589,12 @@
 	*default_acl = NULL;
 	*acl = NULL;
 	return 0;
+
+no_mem_clone:
+	posix_acl_release(*acl);
+no_mem:
+	posix_acl_release(p);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(posix_acl_create);
 
@@ -772,7 +776,7 @@
 
 	if (!IS_POSIXACL(dentry->d_inode))
 		return -EOPNOTSUPP;
-	if (S_ISLNK(dentry->d_inode->i_mode))
+	if (d_is_symlink(dentry))
 		return -EOPNOTSUPP;
 
 	acl = get_acl(dentry->d_inode, type);
@@ -832,7 +836,7 @@
 
 	if (!IS_POSIXACL(dentry->d_inode))
 		return -EOPNOTSUPP;
-	if (S_ISLNK(dentry->d_inode->i_mode))
+	if (d_is_symlink(dentry))
 		return -EOPNOTSUPP;
 
 	if (type == ACL_TYPE_ACCESS)
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 3309f59..be65b20 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,7 +19,6 @@
 #include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/idr.h>
-#include <linux/namei.h>
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -223,17 +222,6 @@
 	spin_unlock_irqrestore(&proc_inum_lock, flags);
 }
 
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-	nd_set_link(nd, __PDE_DATA(dentry->d_inode));
-	return NULL;
-}
-
-static const struct inode_operations proc_link_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= proc_follow_link,
-};
-
 /*
  * Don't create negative dentries here, return -ENOENT by hand
  * instead.
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 13a50a3..7697b66 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -393,6 +394,26 @@
 };
 #endif
 
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	struct proc_dir_entry *pde = PDE(dentry->d_inode);
+	if (unlikely(!use_pde(pde)))
+		return ERR_PTR(-EINVAL);
+	nd_set_link(nd, pde->data);
+	return pde;
+}
+
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+	unuse_pde(p);
+}
+
+const struct inode_operations proc_link_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= proc_follow_link,
+	.put_link	= proc_put_link,
+};
+
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
 	struct inode *inode = new_inode_pseudo(sb);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 6fcdba5..c835b94 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -200,6 +200,7 @@
 	int closing;
 	struct completion *c;
 };
+extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 956b75d..6dee68d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1325,6 +1325,9 @@
 
 static int pagemap_open(struct inode *inode, struct file *file)
 {
+	/* do not disclose physical addresses: attack vector */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
 			"to stop being page-shift some time soon. See the "
 			"linux/Documentation/vm/pagemap.txt for details.\n");
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 04b0614..4e781e6 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -266,7 +266,7 @@
 		for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
 			struct dentry *dentry = buf.dentries[i];
 
-			if (!S_ISDIR(dentry->d_inode->i_mode))
+			if (!d_is_dir(dentry))
 				err = action(dentry, data);
 
 			dput(dentry);
@@ -322,7 +322,7 @@
 	struct inode *dir = dentry->d_parent->d_inode;
 
 	/* This is the xattr dir, handle specially. */
-	if (S_ISDIR(dentry->d_inode->i_mode))
+	if (d_is_dir(dentry))
 		return xattr_rmdir(dir, dentry);
 
 	return xattr_unlink(dir, dentry);
diff --git a/fs/super.c b/fs/super.c
index 65a53ef..2b7dc90 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -71,7 +71,7 @@
 	if (!(sc->gfp_mask & __GFP_FS))
 		return SHRINK_STOP;
 
-	if (!grab_super_passive(sb))
+	if (!trylock_super(sb))
 		return SHRINK_STOP;
 
 	if (sb->s_op->nr_cached_objects)
@@ -105,7 +105,7 @@
 		freed += sb->s_op->free_cached_objects(sb, sc);
 	}
 
-	drop_super(sb);
+	up_read(&sb->s_umount);
 	return freed;
 }
 
@@ -118,7 +118,7 @@
 	sb = container_of(shrink, struct super_block, s_shrink);
 
 	/*
-	 * Don't call grab_super_passive as it is a potential
+	 * Don't call trylock_super as it is a potential
 	 * scalability bottleneck. The counts could get updated
 	 * between super_cache_count and super_cache_scan anyway.
 	 * Call to super_cache_count with shrinker_rwsem held
@@ -348,35 +348,31 @@
 }
 
 /*
- *	grab_super_passive - acquire a passive reference
+ *	trylock_super - try to grab ->s_umount shared
  *	@sb: reference we are trying to grab
  *
- *	Tries to acquire a passive reference. This is used in places where we
+ *	Try to prevent fs shutdown.  This is used in places where we
  *	cannot take an active reference but we need to ensure that the
- *	superblock does not go away while we are working on it. It returns
- *	false if a reference was not gained, and returns true with the s_umount
- *	lock held in read mode if a reference is gained. On successful return,
- *	the caller must drop the s_umount lock and the passive reference when
- *	done.
+ *	filesystem is not shut down while we are working on it. It returns
+ *	false if we cannot acquire s_umount or if we lose the race and
+ *	filesystem already got into shutdown, and returns true with the s_umount
+ *	lock held in read mode in case of success. On successful return,
+ *	the caller must drop the s_umount lock when done.
+ *
+ *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
+ *	The reason why it's safe is that we are OK with doing trylock instead
+ *	of down_read().  There's a couple of places that are OK with that, but
+ *	it's very much not a general-purpose interface.
  */
-bool grab_super_passive(struct super_block *sb)
+bool trylock_super(struct super_block *sb)
 {
-	spin_lock(&sb_lock);
-	if (hlist_unhashed(&sb->s_instances)) {
-		spin_unlock(&sb_lock);
-		return false;
-	}
-
-	sb->s_count++;
-	spin_unlock(&sb_lock);
-
 	if (down_read_trylock(&sb->s_umount)) {
-		if (sb->s_root && (sb->s_flags & MS_BORN))
+		if (!hlist_unhashed(&sb->s_instances) &&
+		    sb->s_root && (sb->s_flags & MS_BORN))
 			return true;
 		up_read(&sb->s_umount);
 	}
 
-	put_super(sb);
 	return false;
 }
 
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index d617999..df68285 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -121,3 +121,4 @@
 xfs-$(CONFIG_PROC_FS)		+= xfs_stats.o
 xfs-$(CONFIG_SYSCTL)		+= xfs_sysctl.o
 xfs-$(CONFIG_COMPAT)		+= xfs_ioctl32.o
+xfs-$(CONFIG_NFSD_PNFS)		+= xfs_pnfs.o
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 5eb4a14..b97359b 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -30,6 +30,7 @@
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_log.h"
+#include "xfs_pnfs.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
@@ -245,4 +246,9 @@
 	.fh_to_parent		= xfs_fs_fh_to_parent,
 	.get_parent		= xfs_fs_get_parent,
 	.commit_metadata	= xfs_fs_nfs_commit_metadata,
+#ifdef CONFIG_NFSD_PNFS
+	.get_uuid		= xfs_fs_get_uuid,
+	.map_blocks		= xfs_fs_map_blocks,
+	.commit_blocks		= xfs_fs_commit_blocks,
+#endif
 };
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1cdba95..a2e1cb8 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -36,6 +36,7 @@
 #include "xfs_trace.h"
 #include "xfs_log.h"
 #include "xfs_icache.h"
+#include "xfs_pnfs.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -396,7 +397,8 @@
 xfs_zero_last_block(
 	struct xfs_inode	*ip,
 	xfs_fsize_t		offset,
-	xfs_fsize_t		isize)
+	xfs_fsize_t		isize,
+	bool			*did_zeroing)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	xfs_fileoff_t		last_fsb = XFS_B_TO_FSBT(mp, isize);
@@ -424,6 +426,7 @@
 	zero_len = mp->m_sb.sb_blocksize - zero_offset;
 	if (isize + zero_len > offset)
 		zero_len = offset - isize;
+	*did_zeroing = true;
 	return xfs_iozero(ip, isize, zero_len);
 }
 
@@ -442,7 +445,8 @@
 xfs_zero_eof(
 	struct xfs_inode	*ip,
 	xfs_off_t		offset,		/* starting I/O offset */
-	xfs_fsize_t		isize)		/* current inode size */
+	xfs_fsize_t		isize,		/* current inode size */
+	bool			*did_zeroing)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	xfs_fileoff_t		start_zero_fsb;
@@ -464,7 +468,7 @@
 	 * We only zero a part of that block so it is handled specially.
 	 */
 	if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
-		error = xfs_zero_last_block(ip, offset, isize);
+		error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
 		if (error)
 			return error;
 	}
@@ -524,6 +528,7 @@
 		if (error)
 			return error;
 
+		*did_zeroing = true;
 		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 	}
@@ -554,6 +559,10 @@
 	if (error)
 		return error;
 
+	error = xfs_break_layouts(inode, iolock);
+	if (error)
+		return error;
+
 	/*
 	 * If the offset is beyond the size of the file, we need to zero any
 	 * blocks that fall between the existing EOF and the start of this
@@ -562,13 +571,15 @@
 	 * having to redo all checks before.
 	 */
 	if (*pos > i_size_read(inode)) {
+		bool	zero = false;
+
 		if (*iolock == XFS_IOLOCK_SHARED) {
 			xfs_rw_iunlock(ip, *iolock);
 			*iolock = XFS_IOLOCK_EXCL;
 			xfs_rw_ilock(ip, *iolock);
 			goto restart;
 		}
-		error = xfs_zero_eof(ip, *pos, i_size_read(inode));
+		error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
 		if (error)
 			return error;
 	}
@@ -822,6 +833,7 @@
 	struct xfs_inode	*ip = XFS_I(inode);
 	long			error;
 	enum xfs_prealloc_flags	flags = 0;
+	uint			iolock = XFS_IOLOCK_EXCL;
 	loff_t			new_size = 0;
 
 	if (!S_ISREG(inode->i_mode))
@@ -830,7 +842,11 @@
 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
 		return -EOPNOTSUPP;
 
-	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	xfs_ilock(ip, iolock);
+	error = xfs_break_layouts(inode, &iolock);
+	if (error)
+		goto out_unlock;
+
 	if (mode & FALLOC_FL_PUNCH_HOLE) {
 		error = xfs_free_file_space(ip, offset, len);
 		if (error)
@@ -894,7 +910,7 @@
 	}
 
 out_unlock:
-	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	xfs_iunlock(ip, iolock);
 	return error;
 }
 
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index fba6532..74efe5b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -602,6 +602,12 @@
 	if (!mutex_trylock(&mp->m_growlock))
 		return -EWOULDBLOCK;
 	error = xfs_growfs_data_private(mp, in);
+	/*
+	 * Increment the generation unconditionally, the error could be from
+	 * updating the secondary superblocks, in which case the new size
+	 * is live already.
+	 */
+	mp->m_generation++;
 	mutex_unlock(&mp->m_growlock);
 	return error;
 }
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index daafa1f..6163767 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2867,6 +2867,10 @@
 	 * Handle RENAME_EXCHANGE flags
 	 */
 	if (flags & RENAME_EXCHANGE) {
+		if (target_ip == NULL) {
+			error = -EINVAL;
+			goto error_return;
+		}
 		error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
 					 target_dp, target_name, target_ip,
 					 &free_list, &first_block, spaceres);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 86cd6b3..a1cd55f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -384,10 +384,11 @@
 	XFS_PREALLOC_INVISIBLE	= (1 << 4),
 };
 
-int		xfs_update_prealloc_flags(struct xfs_inode *,
-			enum xfs_prealloc_flags);
-int		xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
-int		xfs_iozero(struct xfs_inode *, loff_t, size_t);
+int	xfs_update_prealloc_flags(struct xfs_inode *ip,
+				  enum xfs_prealloc_flags flags);
+int	xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
+		     xfs_fsize_t isize, bool *did_zeroing);
+int	xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
 
 
 #define IHOLD(ip) \
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index f7afb86..ac4feae 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -39,6 +39,7 @@
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_trans.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -286,7 +287,7 @@
 		return PTR_ERR(dentry);
 
 	/* Restrict this handle operation to symlinks only. */
-	if (!S_ISLNK(dentry->d_inode->i_mode)) {
+	if (!d_is_symlink(dentry)) {
 		error = -EINVAL;
 		goto out_dput;
 	}
@@ -608,6 +609,7 @@
 {
 	struct iattr		iattr;
 	enum xfs_prealloc_flags	flags = 0;
+	uint			iolock = XFS_IOLOCK_EXCL;
 	int			error;
 
 	/*
@@ -636,7 +638,10 @@
 	if (error)
 		return error;
 
-	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	xfs_ilock(ip, iolock);
+	error = xfs_break_layouts(inode, &iolock);
+	if (error)
+		goto out_unlock;
 
 	switch (bf->l_whence) {
 	case 0: /*SEEK_SET*/
@@ -725,7 +730,7 @@
 	error = xfs_update_prealloc_flags(ip, flags);
 
 out_unlock:
-	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	xfs_iunlock(ip, iolock);
 	mnt_drop_write_file(filp);
 	return error;
 }
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ce80eeb..e53a903 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -37,6 +37,7 @@
 #include "xfs_da_btree.h"
 #include "xfs_dir2.h"
 #include "xfs_trans_space.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -505,7 +506,7 @@
 	inode->i_mode |= mode & ~S_IFMT;
 }
 
-static void
+void
 xfs_setattr_time(
 	struct xfs_inode	*ip,
 	struct iattr		*iattr)
@@ -750,6 +751,7 @@
 	int			error;
 	uint			lock_flags = 0;
 	uint			commit_flags = 0;
+	bool			did_zeroing = false;
 
 	trace_xfs_setattr(ip);
 
@@ -793,20 +795,16 @@
 		return error;
 
 	/*
-	 * Now we can make the changes.  Before we join the inode to the
-	 * transaction, take care of the part of the truncation that must be
-	 * done without the inode lock.  This needs to be done before joining
-	 * the inode to the transaction, because the inode cannot be unlocked
-	 * once it is a part of the transaction.
+	 * File data changes must be complete before we start the transaction to
+	 * modify the inode.  This needs to be done before joining the inode to
+	 * the transaction because the inode cannot be unlocked once it is a
+	 * part of the transaction.
+	 *
+	 * Start with zeroing any data block beyond EOF that we may expose on
+	 * file extension.
 	 */
 	if (newsize > oldsize) {
-		/*
-		 * Do the first part of growing a file: zero any data in the
-		 * last block that is beyond the old EOF.  We need to do this
-		 * before the inode is joined to the transaction to modify
-		 * i_size.
-		 */
-		error = xfs_zero_eof(ip, newsize, oldsize);
+		error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
 		if (error)
 			return error;
 	}
@@ -816,23 +814,18 @@
 	 * any previous writes that are beyond the on disk EOF and the new
 	 * EOF that have not been written out need to be written here.  If we
 	 * do not write the data out, we expose ourselves to the null files
-	 * problem.
-	 *
-	 * Only flush from the on disk size to the smaller of the in memory
-	 * file size or the new size as that's the range we really care about
-	 * here and prevents waiting for other data not within the range we
-	 * care about here.
+	 * problem. Note that this includes any block zeroing we did above;
+	 * otherwise those blocks may not be zeroed after a crash.
 	 */
-	if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
+	if (newsize > ip->i_d.di_size &&
+	    (oldsize != ip->i_d.di_size || did_zeroing)) {
 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
 						      ip->i_d.di_size, newsize);
 		if (error)
 			return error;
 	}
 
-	/*
-	 * Wait for all direct I/O to complete.
-	 */
+	/* Now wait for all direct I/O to complete. */
 	inode_dio_wait(inode);
 
 	/*
@@ -979,9 +972,13 @@
 	int			error;
 
 	if (iattr->ia_valid & ATTR_SIZE) {
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-		error = xfs_setattr_size(ip, iattr);
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		uint		iolock = XFS_IOLOCK_EXCL;
+
+		xfs_ilock(ip, iolock);
+		error = xfs_break_layouts(dentry->d_inode, &iolock);
+		if (!error)
+			error = xfs_setattr_size(ip, iattr);
+		xfs_iunlock(ip, iolock);
 	} else {
 		error = xfs_setattr_nonsize(ip, iattr, 0);
 	}
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index 1c34e43..ea7a98e 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -32,6 +32,7 @@
  */
 #define XFS_ATTR_NOACL		0x01	/* Don't call posix_acl_chmod */
 
+extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
 			       int flags);
 extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a5b2ff8..0d8abd6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -174,6 +174,17 @@
 	struct workqueue_struct	*m_reclaim_workqueue;
 	struct workqueue_struct	*m_log_workqueue;
 	struct workqueue_struct *m_eofblocks_workqueue;
+
+	/*
+	 * Generation of the filesysyem layout.  This is incremented by each
+	 * growfs, and used by the pNFS server to ensure the client updates
+	 * its view of the block device once it gets a layout that might
+	 * reference the newly added blocks.  Does not need to be persistent
+	 * as long as we only allow file system size increments, but if we
+	 * ever support shrinks it would have to be persisted in addition
+	 * to various other kinds of pain inflicted on the pNFS server.
+	 */
+	__uint32_t		m_generation;
 } xfs_mount_t;
 
 /*
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
new file mode 100644
index 0000000..365dd57
--- /dev/null
+++ b/fs/xfs/xfs_pnfs.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include "xfs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_error.h"
+#include "xfs_iomap.h"
+#include "xfs_shared.h"
+#include "xfs_bit.h"
+#include "xfs_pnfs.h"
+
+/*
+ * Ensure that we do not have any outstanding pNFS layouts that can be used by
+ * clients to directly read from or write to this inode.  This must be called
+ * before every operation that can remove blocks from the extent map.
+ * Additionally we call it during the write operation, where aren't concerned
+ * about exposing unallocated blocks but just want to provide basic
+ * synchronization between a local writer and pNFS clients.  mmap writes would
+ * also benefit from this sort of synchronization, but due to the tricky locking
+ * rules in the page fault path we don't bother.
+ */
+int
+xfs_break_layouts(
+	struct inode		*inode,
+	uint			*iolock)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	int			error;
+
+	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+
+	while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+		xfs_iunlock(ip, *iolock);
+		error = break_layout(inode, true);
+		*iolock = XFS_IOLOCK_EXCL;
+		xfs_ilock(ip, *iolock);
+	}
+
+	return error;
+}
+
+/*
+ * Get a unique ID including its location so that the client can identify
+ * the exported device.
+ */
+int
+xfs_fs_get_uuid(
+	struct super_block	*sb,
+	u8			*buf,
+	u32			*len,
+	u64			*offset)
+{
+	struct xfs_mount	*mp = XFS_M(sb);
+
+	printk_once(KERN_NOTICE
+"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
+		mp->m_fsname);
+
+	if (*len < sizeof(uuid_t))
+		return -EINVAL;
+
+	memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
+	*len = sizeof(uuid_t);
+	*offset = offsetof(struct xfs_dsb, sb_uuid);
+	return 0;
+}
+
+static void
+xfs_bmbt_to_iomap(
+	struct xfs_inode	*ip,
+	struct iomap		*iomap,
+	struct xfs_bmbt_irec	*imap)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+
+	if (imap->br_startblock == HOLESTARTBLOCK) {
+		iomap->blkno = IOMAP_NULL_BLOCK;
+		iomap->type = IOMAP_HOLE;
+	} else if (imap->br_startblock == DELAYSTARTBLOCK) {
+		iomap->blkno = IOMAP_NULL_BLOCK;
+		iomap->type = IOMAP_DELALLOC;
+	} else {
+		iomap->blkno =
+			XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
+		if (imap->br_state == XFS_EXT_UNWRITTEN)
+			iomap->type = IOMAP_UNWRITTEN;
+		else
+			iomap->type = IOMAP_MAPPED;
+	}
+	iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+	iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+}
+
+/*
+ * Get a layout for the pNFS client.
+ */
+int
+xfs_fs_map_blocks(
+	struct inode		*inode,
+	loff_t			offset,
+	u64			length,
+	struct iomap		*iomap,
+	bool			write,
+	u32			*device_generation)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_bmbt_irec	imap;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	loff_t			limit;
+	int			bmapi_flags = XFS_BMAPI_ENTIRE;
+	int			nimaps = 1;
+	uint			lock_flags;
+	int			error = 0;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -EIO;
+
+	/*
+	 * We can't export inodes residing on the realtime device.  The realtime
+	 * device doesn't have a UUID to identify it, so the client has no way
+	 * to find it.
+	 */
+	if (XFS_IS_REALTIME_INODE(ip))
+		return -ENXIO;
+
+	/*
+	 * Lock out any other I/O before we flush and invalidate the pagecache,
+	 * and then hand out a layout to the remote system.  This is very
+	 * similar to direct I/O, except that the synchronization is much more
+	 * complicated.  See the comment near xfs_break_layouts for a detailed
+	 * explanation.
+	 */
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+	error = -EINVAL;
+	limit = mp->m_super->s_maxbytes;
+	if (!write)
+		limit = max(limit, round_up(i_size_read(inode),
+				     inode->i_sb->s_blocksize));
+	if (offset > limit)
+		goto out_unlock;
+	if (offset > limit - length)
+		length = limit - offset;
+
+	error = filemap_write_and_wait(inode->i_mapping);
+	if (error)
+		goto out_unlock;
+	error = invalidate_inode_pages2(inode->i_mapping);
+	if (WARN_ON_ONCE(error))
+		return error;
+
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+	lock_flags = xfs_ilock_data_map_shared(ip);
+	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+				&imap, &nimaps, bmapi_flags);
+	xfs_iunlock(ip, lock_flags);
+
+	if (error)
+		goto out_unlock;
+
+	if (write) {
+		enum xfs_prealloc_flags	flags = 0;
+
+		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+
+		if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
+			error = xfs_iomap_write_direct(ip, offset, length,
+						       &imap, nimaps);
+			if (error)
+				goto out_unlock;
+
+			/*
+			 * Ensure the next transaction is committed
+			 * synchronously so that the blocks allocated and
+			 * handed out to the client are guaranteed to be
+			 * present even after a server crash.
+			 */
+			flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
+		}
+
+		error = xfs_update_prealloc_flags(ip, flags);
+		if (error)
+			goto out_unlock;
+	}
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+	xfs_bmbt_to_iomap(ip, iomap, &imap);
+	*device_generation = mp->m_generation;
+	return error;
+out_unlock:
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
+}
+
+/*
+ * Ensure the size update falls into a valid allocated block.
+ */
+static int
+xfs_pnfs_validate_isize(
+	struct xfs_inode	*ip,
+	xfs_off_t		isize)
+{
+	struct xfs_bmbt_irec	imap;
+	int			nimaps = 1;
+	int			error = 0;
+
+	xfs_ilock(ip, XFS_ILOCK_SHARED);
+	error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
+				&imap, &nimaps, 0);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+	if (error)
+		return error;
+
+	if (imap.br_startblock == HOLESTARTBLOCK ||
+	    imap.br_startblock == DELAYSTARTBLOCK ||
+	    imap.br_state == XFS_EXT_UNWRITTEN)
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Make sure the blocks described by maps are stable on disk.  This includes
+ * converting any unwritten extents, flushing the disk cache and updating the
+ * time stamps.
+ *
+ * Note that we rely on the caller to always send us a timestamp update so that
+ * we always commit a transaction here.  If that stops being true we will have
+ * to manually flush the cache here similar to what the fsync code path does
+ * for datasyncs on files that have no dirty metadata.
+ */
+int
+xfs_fs_commit_blocks(
+	struct inode		*inode,
+	struct iomap		*maps,
+	int			nr_maps,
+	struct iattr		*iattr)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	bool			update_isize = false;
+	int			error, i;
+	loff_t			size;
+
+	ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
+
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+	size = i_size_read(inode);
+	if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
+		update_isize = true;
+		size = iattr->ia_size;
+	}
+
+	for (i = 0; i < nr_maps; i++) {
+		u64 start, length, end;
+
+		start = maps[i].offset;
+		if (start > size)
+			continue;
+
+		end = start + maps[i].length;
+		if (end > size)
+			end = size;
+
+		length = end - start;
+		if (!length)
+			continue;
+	
+		/*
+		 * Make sure reads through the pagecache see the new data.
+		 */
+		error = invalidate_inode_pages2_range(inode->i_mapping,
+					start >> PAGE_CACHE_SHIFT,
+					(end - 1) >> PAGE_CACHE_SHIFT);
+		WARN_ON_ONCE(error);
+
+		error = xfs_iomap_write_unwritten(ip, start, length);
+		if (error)
+			goto out_drop_iolock;
+	}
+
+	if (update_isize) {
+		error = xfs_pnfs_validate_isize(ip, size);
+		if (error)
+			goto out_drop_iolock;
+	}
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+	if (error) {
+		xfs_trans_cancel(tp, 0);
+		goto out_drop_iolock;
+	}
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+	xfs_setattr_time(ip, iattr);
+	if (update_isize) {
+		i_size_write(inode, iattr->ia_size);
+		ip->i_d.di_size = iattr->ia_size;
+	}
+
+	xfs_trans_set_sync(tp);
+	error = xfs_trans_commit(tp, 0);
+
+out_drop_iolock:
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+	return error;
+}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
new file mode 100644
index 0000000..b7fbfce
--- /dev/null
+++ b/fs/xfs/xfs_pnfs.h
@@ -0,0 +1,18 @@
+#ifndef _XFS_PNFS_H
+#define _XFS_PNFS_H 1
+
+#ifdef CONFIG_NFSD_PNFS
+int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
+		struct iomap *iomap, bool write, u32 *device_generation);
+int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
+		struct iattr *iattr);
+
+int xfs_break_layouts(struct inode *inode, uint *iolock);
+#else
+static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
+{
+	return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _XFS_PNFS_H */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 53cc2aa..fbbb9e6 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -836,6 +836,11 @@
 		 */
 		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
 			    "xfs_quotacheck");
+		/*
+		 * Reset type in case we are reusing group quota file for
+		 * project quotas or vice versa
+		 */
+		ddq->d_flags = type;
 		ddq->d_bcount = 0;
 		ddq->d_icount = 0;
 		ddq->d_rtbcount = 0;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index a24addf..0de6290 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -68,8 +68,8 @@
 	unsigned scanned_preceeds_hole : 1;
 	unsigned allocated : 1;
 	unsigned long color;
-	unsigned long start;
-	unsigned long size;
+	u64 start;
+	u64 size;
 	struct drm_mm *mm;
 };
 
@@ -82,16 +82,16 @@
 	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_color;
-	unsigned long scan_size;
-	unsigned long scan_hit_start;
-	unsigned long scan_hit_end;
+	u64 scan_size;
+	u64 scan_hit_start;
+	u64 scan_hit_end;
 	unsigned scanned_blocks;
-	unsigned long scan_start;
-	unsigned long scan_end;
+	u64 scan_start;
+	u64 scan_end;
 	struct drm_mm_node *prev_scanned_node;
 
 	void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
-			     unsigned long *start, unsigned long *end);
+			     u64 *start, u64 *end);
 };
 
 /**
@@ -124,7 +124,7 @@
 	return mm->hole_stack.next;
 }
 
-static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
 	return hole_node->start + hole_node->size;
 }
@@ -140,13 +140,13 @@
  * Returns:
  * Start of the subsequent hole.
  */
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
 	BUG_ON(!hole_node->hole_follows);
 	return __drm_mm_hole_node_start(hole_node);
 }
 
-static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 {
 	return list_entry(hole_node->node_list.next,
 			  struct drm_mm_node, node_list)->start;
@@ -163,7 +163,7 @@
  * Returns:
  * End of the subsequent hole.
  */
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 {
 	return __drm_mm_hole_node_end(hole_node);
 }
@@ -222,7 +222,7 @@
 
 int drm_mm_insert_node_generic(struct drm_mm *mm,
 			       struct drm_mm_node *node,
-			       unsigned long size,
+			       u64 size,
 			       unsigned alignment,
 			       unsigned long color,
 			       enum drm_mm_search_flags sflags,
@@ -245,7 +245,7 @@
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
 				     struct drm_mm_node *node,
-				     unsigned long size,
+				     u64 size,
 				     unsigned alignment,
 				     enum drm_mm_search_flags flags)
 {
@@ -255,11 +255,11 @@
 
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
 					struct drm_mm_node *node,
-					unsigned long size,
+					u64 size,
 					unsigned alignment,
 					unsigned long color,
-					unsigned long start,
-					unsigned long end,
+					u64 start,
+					u64 end,
 					enum drm_mm_search_flags sflags,
 					enum drm_mm_allocator_flags aflags);
 /**
@@ -282,10 +282,10 @@
  */
 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 					      struct drm_mm_node *node,
-					      unsigned long size,
+					      u64 size,
 					      unsigned alignment,
-					      unsigned long start,
-					      unsigned long end,
+					      u64 start,
+					      u64 end,
 					      enum drm_mm_search_flags flags)
 {
 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
@@ -296,21 +296,21 @@
 void drm_mm_remove_node(struct drm_mm_node *node);
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 void drm_mm_init(struct drm_mm *mm,
-		 unsigned long start,
-		 unsigned long size);
+		 u64 start,
+		 u64 size);
 void drm_mm_takedown(struct drm_mm *mm);
 bool drm_mm_clean(struct drm_mm *mm);
 
 void drm_mm_init_scan(struct drm_mm *mm,
-		      unsigned long size,
+		      u64 size,
 		      unsigned alignment,
 		      unsigned long color);
 void drm_mm_init_scan_with_range(struct drm_mm *mm,
-				 unsigned long size,
+				 u64 size,
 				 unsigned alignment,
 				 unsigned long color,
-				 unsigned long start,
-				 unsigned long end);
+				 u64 start,
+				 u64 end);
 bool drm_mm_scan_add_block(struct drm_mm_node *node);
 bool drm_mm_scan_remove_block(struct drm_mm_node *node);
 
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 180ad0e..d016dc5 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -214,9 +214,9 @@
 	INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
 
 #define _INTEL_BDW_M_IDS(gt, info) \
-	_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
+	_INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
 	_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
-	_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
+	_INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
 	_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
 
 #define _INTEL_BDW_D_IDS(gt, info) \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0ccf7f2..c768ddf 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -249,7 +249,7 @@
 	 * either of these locks held.
 	 */
 
-	unsigned long offset;
+	uint64_t offset; /* GPU address space is independent of CPU word size */
 	uint32_t cur_placement;
 
 	struct sg_table *sg;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 142d752..813042c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -277,7 +277,7 @@
 	bool has_type;
 	bool use_type;
 	uint32_t flags;
-	unsigned long gpu_offset;
+	uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
 	uint64_t size;
 	uint32_t available_caching;
 	uint32_t default_caching;
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644
index 0000000..04e8db2
--- /dev/null
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ASM9260_H
+#define _DT_BINDINGS_CLK_ASM9260_H
+
+/* ahb gate */
+#define CLKID_AHB_ROM		0
+#define CLKID_AHB_RAM		1
+#define CLKID_AHB_GPIO		2
+#define CLKID_AHB_MAC		3
+#define CLKID_AHB_EMI		4
+#define CLKID_AHB_USB0		5
+#define CLKID_AHB_USB1		6
+#define CLKID_AHB_DMA0		7
+#define CLKID_AHB_DMA1		8
+#define CLKID_AHB_UART0		9
+#define CLKID_AHB_UART1		10
+#define CLKID_AHB_UART2		11
+#define CLKID_AHB_UART3		12
+#define CLKID_AHB_UART4		13
+#define CLKID_AHB_UART5		14
+#define CLKID_AHB_UART6		15
+#define CLKID_AHB_UART7		16
+#define CLKID_AHB_UART8		17
+#define CLKID_AHB_UART9		18
+#define CLKID_AHB_I2S0		19
+#define CLKID_AHB_I2C0		20
+#define CLKID_AHB_I2C1		21
+#define CLKID_AHB_SSP0		22
+#define CLKID_AHB_IOCONFIG	23
+#define CLKID_AHB_WDT		24
+#define CLKID_AHB_CAN0		25
+#define CLKID_AHB_CAN1		26
+#define CLKID_AHB_MPWM		27
+#define CLKID_AHB_SPI0		28
+#define CLKID_AHB_SPI1		29
+#define CLKID_AHB_QEI		30
+#define CLKID_AHB_QUADSPI0	31
+#define CLKID_AHB_CAMIF		32
+#define CLKID_AHB_LCDIF		33
+#define CLKID_AHB_TIMER0	34
+#define CLKID_AHB_TIMER1	35
+#define CLKID_AHB_TIMER2	36
+#define CLKID_AHB_TIMER3	37
+#define CLKID_AHB_IRQ		38
+#define CLKID_AHB_RTC		39
+#define CLKID_AHB_NAND		40
+#define CLKID_AHB_ADC0		41
+#define CLKID_AHB_LED		42
+#define CLKID_AHB_DAC0		43
+#define CLKID_AHB_LCD		44
+#define CLKID_AHB_I2S1		45
+#define CLKID_AHB_MAC1		46
+
+/* devider */
+#define CLKID_SYS_CPU		47
+#define CLKID_SYS_AHB		48
+#define CLKID_SYS_I2S0M		49
+#define CLKID_SYS_I2S0S		50
+#define CLKID_SYS_I2S1M		51
+#define CLKID_SYS_I2S1S		52
+#define CLKID_SYS_UART0		53
+#define CLKID_SYS_UART1		54
+#define CLKID_SYS_UART2		55
+#define CLKID_SYS_UART3		56
+#define CLKID_SYS_UART4		56
+#define CLKID_SYS_UART5		57
+#define CLKID_SYS_UART6		58
+#define CLKID_SYS_UART7		59
+#define CLKID_SYS_UART8		60
+#define CLKID_SYS_UART9		61
+#define CLKID_SYS_SPI0		62
+#define CLKID_SYS_SPI1		63
+#define CLKID_SYS_QUADSPI	64
+#define CLKID_SYS_SSP0		65
+#define CLKID_SYS_NAND		66
+#define CLKID_SYS_TRACE		67
+#define CLKID_SYS_CAMM		68
+#define CLKID_SYS_WDT		69
+#define CLKID_SYS_CLKOUT	70
+#define CLKID_SYS_MAC		71
+#define CLKID_SYS_LCD		72
+#define CLKID_SYS_ADCANA	73
+
+#define MAX_CLKS		74
+#endif
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 34fe28c..c4b1676 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -262,8 +262,13 @@
 #define CLK_DIV_MCUISP1		453 /* Exynos4x12 only */
 #define CLK_DIV_ACLK200		454 /* Exynos4x12 only */
 #define CLK_DIV_ACLK400_MCUISP	455 /* Exynos4x12 only */
+#define CLK_DIV_ACP		456
+#define CLK_DIV_DMC		457
+#define CLK_DIV_C2C		458 /* Exynos4x12 only */
+#define CLK_DIV_GDL		459
+#define CLK_DIV_GDR		460
 
 /* must be greater than maximal clock id */
-#define CLK_NR_CLKS		456
+#define CLK_NR_CLKS		461
 
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index 8e4681b..e33c75a 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -17,7 +17,11 @@
 #define DOUT_SCLK_CC_PLL		4
 #define DOUT_SCLK_MFC_PLL		5
 #define DOUT_ACLK_CCORE_133		6
-#define TOPC_NR_CLK			7
+#define DOUT_ACLK_MSCL_532		7
+#define ACLK_MSCL_532			8
+#define DOUT_SCLK_AUD_PLL		9
+#define FOUT_AUD_PLL			10
+#define TOPC_NR_CLK			11
 
 /* TOP0 */
 #define DOUT_ACLK_PERIC1		1
@@ -26,7 +30,15 @@
 #define CLK_SCLK_UART1			4
 #define CLK_SCLK_UART2			5
 #define CLK_SCLK_UART3			6
-#define TOP0_NR_CLK			7
+#define CLK_SCLK_SPI0			7
+#define CLK_SCLK_SPI1			8
+#define CLK_SCLK_SPI2			9
+#define CLK_SCLK_SPI3			10
+#define CLK_SCLK_SPI4			11
+#define CLK_SCLK_SPDIF			12
+#define CLK_SCLK_PCM1			13
+#define CLK_SCLK_I2S1			14
+#define TOP0_NR_CLK			15
 
 /* TOP1 */
 #define DOUT_ACLK_FSYS1_200		1
@@ -70,7 +82,23 @@
 #define PCLK_HSI2C6			9
 #define PCLK_HSI2C7			10
 #define PCLK_HSI2C8			11
-#define PERIC1_NR_CLK			12
+#define PCLK_SPI0			12
+#define PCLK_SPI1			13
+#define PCLK_SPI2			14
+#define PCLK_SPI3			15
+#define PCLK_SPI4			16
+#define SCLK_SPI0			17
+#define SCLK_SPI1			18
+#define SCLK_SPI2			19
+#define SCLK_SPI3			20
+#define SCLK_SPI4			21
+#define PCLK_I2S1			22
+#define PCLK_PCM1			23
+#define PCLK_SPDIF			24
+#define SCLK_I2S1			25
+#define SCLK_PCM1			26
+#define SCLK_SPDIF			27
+#define PERIC1_NR_CLK			28
 
 /* PERIS */
 #define PCLK_CHIPID			1
@@ -82,11 +110,63 @@
 
 /* FSYS0 */
 #define ACLK_MMC2			1
-#define FSYS0_NR_CLK			2
+#define ACLK_AXIUS_USBDRD30X_FSYS0X	2
+#define ACLK_USBDRD300			3
+#define SCLK_USBDRD300_SUSPENDCLK	4
+#define SCLK_USBDRD300_REFCLK		5
+#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER		6
+#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER		7
+#define OSCCLK_PHY_CLKOUT_USB30_PHY		8
+#define ACLK_PDMA0			9
+#define ACLK_PDMA1			10
+#define FSYS0_NR_CLK			11
 
 /* FSYS1 */
 #define ACLK_MMC1			1
 #define ACLK_MMC0			2
 #define FSYS1_NR_CLK			3
 
+/* MSCL */
+#define USERMUX_ACLK_MSCL_532		1
+#define DOUT_PCLK_MSCL			2
+#define ACLK_MSCL_0			3
+#define ACLK_MSCL_1			4
+#define ACLK_JPEG			5
+#define ACLK_G2D			6
+#define ACLK_LH_ASYNC_SI_MSCL_0		7
+#define ACLK_LH_ASYNC_SI_MSCL_1		8
+#define ACLK_AXI2ACEL_BRIDGE		9
+#define ACLK_XIU_MSCLX_0		10
+#define ACLK_XIU_MSCLX_1		11
+#define ACLK_QE_MSCL_0			12
+#define ACLK_QE_MSCL_1			13
+#define ACLK_QE_JPEG			14
+#define ACLK_QE_G2D			15
+#define ACLK_PPMU_MSCL_0		16
+#define ACLK_PPMU_MSCL_1		17
+#define ACLK_MSCLNP_133			18
+#define ACLK_AHB2APB_MSCL0P		19
+#define ACLK_AHB2APB_MSCL1P		20
+
+#define PCLK_MSCL_0			21
+#define PCLK_MSCL_1			22
+#define PCLK_JPEG			23
+#define PCLK_G2D			24
+#define PCLK_QE_MSCL_0			25
+#define PCLK_QE_MSCL_1			26
+#define PCLK_QE_JPEG			27
+#define PCLK_QE_G2D			28
+#define PCLK_PPMU_MSCL_0		29
+#define PCLK_PPMU_MSCL_1		30
+#define PCLK_AXI2ACEL_BRIDGE		31
+#define PCLK_PMU_MSCL			32
+#define MSCL_NR_CLK			33
+
+/* AUD */
+#define SCLK_I2S			1
+#define SCLK_PCM			2
+#define PCLK_I2S			3
+#define PCLK_PCM			4
+#define ACLK_ADMA			5
+#define AUD_NR_CLK			6
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index b857cad..04fb29a 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -238,7 +238,6 @@
 #define PLL0_VOTE				221
 #define PLL3					222
 #define PLL3_VOTE				223
-#define PLL4					224
 #define PLL4_VOTE				225
 #define PLL8					226
 #define PLL8_VOTE				227
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644
index 0000000..4e944b8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
+
+#define PLL4				0
+#define MI2S_OSR_SRC			1
+#define MI2S_OSR_CLK			2
+#define MI2S_DIV_CLK			3
+#define MI2S_BIT_DIV_CLK		4
+#define MI2S_BIT_CLK			5
+#define PCM_SRC				6
+#define PCM_CLK_OUT			7
+#define PCM_CLK				8
+#define SPDIF_SRC			9
+#define SPDIF_CLK			10
+#define AHBIX_CLK			11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644
index 0000000..4fb2aa6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
+#define _DT_BINDINGS_CLK_LCC_MSM8960_H
+
+#define PLL4				0
+#define MI2S_OSR_SRC			1
+#define MI2S_OSR_CLK			2
+#define MI2S_DIV_CLK			3
+#define MI2S_BIT_DIV_CLK		4
+#define MI2S_BIT_CLK			5
+#define PCM_SRC				6
+#define PCM_CLK_OUT			7
+#define PCM_CLK				8
+#define SLIMBUS_SRC			9
+#define AUDIO_SLIMBUS_CLK		10
+#define SPS_SLIMBUS_CLK			11
+#define CODEC_I2S_MIC_OSR_SRC		12
+#define CODEC_I2S_MIC_OSR_CLK		13
+#define CODEC_I2S_MIC_DIV_CLK		14
+#define CODEC_I2S_MIC_BIT_DIV_CLK	15
+#define CODEC_I2S_MIC_BIT_CLK		16
+#define SPARE_I2S_MIC_OSR_SRC		17
+#define SPARE_I2S_MIC_OSR_CLK		18
+#define SPARE_I2S_MIC_DIV_CLK		19
+#define SPARE_I2S_MIC_BIT_DIV_CLK	20
+#define SPARE_I2S_MIC_BIT_CLK		21
+#define CODEC_I2S_SPKR_OSR_SRC		22
+#define CODEC_I2S_SPKR_OSR_CLK		23
+#define CODEC_I2S_SPKR_DIV_CLK		24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK	25
+#define CODEC_I2S_SPKR_BIT_CLK		26
+#define SPARE_I2S_SPKR_OSR_SRC		27
+#define SPARE_I2S_SPKR_OSR_CLK		28
+#define SPARE_I2S_SPKR_DIV_CLK		29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK	30
+#define SPARE_I2S_SPKR_BIT_CLK		31
+
+#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644
index 0000000..ae2eb17
--- /dev/null
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -0,0 +1,345 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-car or
+ * nvidia,tegra132-car.
+ *
+ * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
+ * registers. These IDs often match those in the CAR's RST_DEVICES registers,
+ * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
+ * this case, those clocks are assigned IDs above 185 in order to highlight
+ * this issue. Implementations that interpret these clock IDs as bit values
+ * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
+ * explicitly handle these special cases.
+ *
+ * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
+ * above.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+
+/* 0 */
+/* 1 */
+/* 2 */
+#define TEGRA124_CLK_ISPB 3
+#define TEGRA124_CLK_RTC 4
+#define TEGRA124_CLK_TIMER 5
+#define TEGRA124_CLK_UARTA 6
+/* 7 (register bit affects uartb and vfir) */
+/* 8 */
+#define TEGRA124_CLK_SDMMC2 9
+/* 10 (register bit affects spdif_in and spdif_out) */
+#define TEGRA124_CLK_I2S1 11
+#define TEGRA124_CLK_I2C1 12
+/* 13 */
+#define TEGRA124_CLK_SDMMC1 14
+#define TEGRA124_CLK_SDMMC4 15
+/* 16 */
+#define TEGRA124_CLK_PWM 17
+#define TEGRA124_CLK_I2S2 18
+/* 20 (register bit affects vi and vi_sensor) */
+/* 21 */
+#define TEGRA124_CLK_USBD 22
+#define TEGRA124_CLK_ISP 23
+/* 26 */
+/* 25 */
+#define TEGRA124_CLK_DISP2 26
+#define TEGRA124_CLK_DISP1 27
+#define TEGRA124_CLK_HOST1X 28
+#define TEGRA124_CLK_VCP 29
+#define TEGRA124_CLK_I2S0 30
+/* 31 */
+
+#define TEGRA124_CLK_MC 32
+/* 33 */
+#define TEGRA124_CLK_APBDMA 34
+/* 35 */
+#define TEGRA124_CLK_KBC 36
+/* 37 */
+/* 38 */
+/* 39 (register bit affects fuse and fuse_burn) */
+#define TEGRA124_CLK_KFUSE 40
+#define TEGRA124_CLK_SBC1 41
+#define TEGRA124_CLK_NOR 42
+/* 43 */
+#define TEGRA124_CLK_SBC2 44
+/* 45 */
+#define TEGRA124_CLK_SBC3 46
+#define TEGRA124_CLK_I2C5 47
+#define TEGRA124_CLK_DSIA 48
+/* 49 */
+#define TEGRA124_CLK_MIPI 50
+#define TEGRA124_CLK_HDMI 51
+#define TEGRA124_CLK_CSI 52
+/* 53 */
+#define TEGRA124_CLK_I2C2 54
+#define TEGRA124_CLK_UARTC 55
+#define TEGRA124_CLK_MIPI_CAL 56
+#define TEGRA124_CLK_EMC 57
+#define TEGRA124_CLK_USB2 58
+#define TEGRA124_CLK_USB3 59
+/* 60 */
+#define TEGRA124_CLK_VDE 61
+#define TEGRA124_CLK_BSEA 62
+#define TEGRA124_CLK_BSEV 63
+
+/* 64 */
+#define TEGRA124_CLK_UARTD 65
+/* 66 */
+#define TEGRA124_CLK_I2C3 67
+#define TEGRA124_CLK_SBC4 68
+#define TEGRA124_CLK_SDMMC3 69
+#define TEGRA124_CLK_PCIE 70
+#define TEGRA124_CLK_OWR 71
+#define TEGRA124_CLK_AFI 72
+#define TEGRA124_CLK_CSITE 73
+/* 74 */
+/* 75 */
+#define TEGRA124_CLK_LA 76
+#define TEGRA124_CLK_TRACE 77
+#define TEGRA124_CLK_SOC_THERM 78
+#define TEGRA124_CLK_DTV 79
+/* 80 */
+#define TEGRA124_CLK_I2CSLOW 81
+#define TEGRA124_CLK_DSIB 82
+#define TEGRA124_CLK_TSEC 83
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+#define TEGRA124_CLK_XUSB_HOST 89
+/* 90 */
+#define TEGRA124_CLK_MSENC 91
+#define TEGRA124_CLK_CSUS 92
+/* 93 */
+/* 94 */
+/* 95 (bit affects xusb_dev and xusb_dev_src) */
+
+/* 96 */
+/* 97 */
+/* 98 */
+#define TEGRA124_CLK_MSELECT 99
+#define TEGRA124_CLK_TSENSOR 100
+#define TEGRA124_CLK_I2S3 101
+#define TEGRA124_CLK_I2S4 102
+#define TEGRA124_CLK_I2C4 103
+#define TEGRA124_CLK_SBC5 104
+#define TEGRA124_CLK_SBC6 105
+#define TEGRA124_CLK_D_AUDIO 106
+#define TEGRA124_CLK_APBIF 107
+#define TEGRA124_CLK_DAM0 108
+#define TEGRA124_CLK_DAM1 109
+#define TEGRA124_CLK_DAM2 110
+#define TEGRA124_CLK_HDA2CODEC_2X 111
+/* 112 */
+#define TEGRA124_CLK_AUDIO0_2X 113
+#define TEGRA124_CLK_AUDIO1_2X 114
+#define TEGRA124_CLK_AUDIO2_2X 115
+#define TEGRA124_CLK_AUDIO3_2X 116
+#define TEGRA124_CLK_AUDIO4_2X 117
+#define TEGRA124_CLK_SPDIF_2X 118
+#define TEGRA124_CLK_ACTMON 119
+#define TEGRA124_CLK_EXTERN1 120
+#define TEGRA124_CLK_EXTERN2 121
+#define TEGRA124_CLK_EXTERN3 122
+#define TEGRA124_CLK_SATA_OOB 123
+#define TEGRA124_CLK_SATA 124
+#define TEGRA124_CLK_HDA 125
+/* 126 */
+#define TEGRA124_CLK_SE 127
+
+#define TEGRA124_CLK_HDA2HDMI 128
+#define TEGRA124_CLK_SATA_COLD 129
+/* 130 */
+/* 131 */
+/* 132 */
+/* 133 */
+/* 134 */
+/* 135 */
+/* 136 */
+/* 137 */
+/* 138 */
+/* 139 */
+/* 140 */
+/* 141 */
+/* 142 */
+/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
+/*      xusb_host_src and xusb_ss_src) */
+#define TEGRA124_CLK_CILAB 144
+#define TEGRA124_CLK_CILCD 145
+#define TEGRA124_CLK_CILE 146
+#define TEGRA124_CLK_DSIALP 147
+#define TEGRA124_CLK_DSIBLP 148
+#define TEGRA124_CLK_ENTROPY 149
+#define TEGRA124_CLK_DDS 150
+/* 151 */
+#define TEGRA124_CLK_DP2 152
+#define TEGRA124_CLK_AMX 153
+#define TEGRA124_CLK_ADX 154
+/* 155 (bit affects dfll_ref and dfll_soc) */
+#define TEGRA124_CLK_XUSB_SS 156
+/* 157 */
+/* 158 */
+/* 159 */
+
+/* 160 */
+/* 161 */
+/* 162 */
+/* 163 */
+/* 164 */
+/* 165 */
+#define TEGRA124_CLK_I2C6 166
+/* 167 */
+/* 168 */
+/* 169 */
+/* 170 */
+#define TEGRA124_CLK_VIM2_CLK 171
+/* 172 */
+/* 173 */
+/* 174 */
+/* 175 */
+#define TEGRA124_CLK_HDMI_AUDIO 176
+#define TEGRA124_CLK_CLK72MHZ 177
+#define TEGRA124_CLK_VIC03 178
+/* 179 */
+#define TEGRA124_CLK_ADX1 180
+#define TEGRA124_CLK_DPAUX 181
+#define TEGRA124_CLK_SOR0 182
+/* 183 */
+#define TEGRA124_CLK_GPU 184
+#define TEGRA124_CLK_AMX1 185
+/* 186 */
+/* 187 */
+/* 188 */
+/* 189 */
+/* 190 */
+/* 191 */
+#define TEGRA124_CLK_UARTB 192
+#define TEGRA124_CLK_VFIR 193
+#define TEGRA124_CLK_SPDIF_IN 194
+#define TEGRA124_CLK_SPDIF_OUT 195
+#define TEGRA124_CLK_VI 196
+#define TEGRA124_CLK_VI_SENSOR 197
+#define TEGRA124_CLK_FUSE 198
+#define TEGRA124_CLK_FUSE_BURN 199
+#define TEGRA124_CLK_CLK_32K 200
+#define TEGRA124_CLK_CLK_M 201
+#define TEGRA124_CLK_CLK_M_DIV2 202
+#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_PLL_REF 204
+#define TEGRA124_CLK_PLL_C 205
+#define TEGRA124_CLK_PLL_C_OUT1 206
+#define TEGRA124_CLK_PLL_C2 207
+#define TEGRA124_CLK_PLL_C3 208
+#define TEGRA124_CLK_PLL_M 209
+#define TEGRA124_CLK_PLL_M_OUT1 210
+#define TEGRA124_CLK_PLL_P 211
+#define TEGRA124_CLK_PLL_P_OUT1 212
+#define TEGRA124_CLK_PLL_P_OUT2 213
+#define TEGRA124_CLK_PLL_P_OUT3 214
+#define TEGRA124_CLK_PLL_P_OUT4 215
+#define TEGRA124_CLK_PLL_A 216
+#define TEGRA124_CLK_PLL_A_OUT0 217
+#define TEGRA124_CLK_PLL_D 218
+#define TEGRA124_CLK_PLL_D_OUT0 219
+#define TEGRA124_CLK_PLL_D2 220
+#define TEGRA124_CLK_PLL_D2_OUT0 221
+#define TEGRA124_CLK_PLL_U 222
+#define TEGRA124_CLK_PLL_U_480M 223
+
+#define TEGRA124_CLK_PLL_U_60M 224
+#define TEGRA124_CLK_PLL_U_48M 225
+#define TEGRA124_CLK_PLL_U_12M 226
+/* 227 */
+/* 228 */
+#define TEGRA124_CLK_PLL_RE_VCO 229
+#define TEGRA124_CLK_PLL_RE_OUT 230
+#define TEGRA124_CLK_PLL_E 231
+#define TEGRA124_CLK_SPDIF_IN_SYNC 232
+#define TEGRA124_CLK_I2S0_SYNC 233
+#define TEGRA124_CLK_I2S1_SYNC 234
+#define TEGRA124_CLK_I2S2_SYNC 235
+#define TEGRA124_CLK_I2S3_SYNC 236
+#define TEGRA124_CLK_I2S4_SYNC 237
+#define TEGRA124_CLK_VIMCLK_SYNC 238
+#define TEGRA124_CLK_AUDIO0 239
+#define TEGRA124_CLK_AUDIO1 240
+#define TEGRA124_CLK_AUDIO2 241
+#define TEGRA124_CLK_AUDIO3 242
+#define TEGRA124_CLK_AUDIO4 243
+#define TEGRA124_CLK_SPDIF 244
+#define TEGRA124_CLK_CLK_OUT_1 245
+#define TEGRA124_CLK_CLK_OUT_2 246
+#define TEGRA124_CLK_CLK_OUT_3 247
+#define TEGRA124_CLK_BLINK 248
+/* 249 */
+/* 250 */
+/* 251 */
+#define TEGRA124_CLK_XUSB_HOST_SRC 252
+#define TEGRA124_CLK_XUSB_FALCON_SRC 253
+#define TEGRA124_CLK_XUSB_FS_SRC 254
+#define TEGRA124_CLK_XUSB_SS_SRC 255
+
+#define TEGRA124_CLK_XUSB_DEV_SRC 256
+#define TEGRA124_CLK_XUSB_DEV 257
+#define TEGRA124_CLK_XUSB_HS_SRC 258
+#define TEGRA124_CLK_SCLK 259
+#define TEGRA124_CLK_HCLK 260
+#define TEGRA124_CLK_PCLK 261
+/* 262 */
+/* 263 */
+#define TEGRA124_CLK_DFLL_REF 264
+#define TEGRA124_CLK_DFLL_SOC 265
+#define TEGRA124_CLK_VI_SENSOR2 266
+#define TEGRA124_CLK_PLL_P_OUT5 267
+#define TEGRA124_CLK_CML0 268
+#define TEGRA124_CLK_CML1 269
+#define TEGRA124_CLK_PLL_C4 270
+#define TEGRA124_CLK_PLL_DP 271
+#define TEGRA124_CLK_PLL_E_MUX 272
+#define TEGRA124_CLK_PLLD_DSI 273
+/* 274 */
+/* 275 */
+/* 276 */
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
+/* 281 */
+/* 282 */
+/* 283 */
+/* 284 */
+/* 285 */
+/* 286 */
+/* 287 */
+
+/* 288 */
+/* 289 */
+/* 290 */
+/* 291 */
+/* 292 */
+/* 293 */
+/* 294 */
+/* 295 */
+/* 296 */
+/* 297 */
+/* 298 */
+/* 299 */
+#define TEGRA124_CLK_AUDIO0_MUX 300
+#define TEGRA124_CLK_AUDIO1_MUX 301
+#define TEGRA124_CLK_AUDIO2_MUX 302
+#define TEGRA124_CLK_AUDIO3_MUX 303
+#define TEGRA124_CLK_AUDIO4_MUX 304
+#define TEGRA124_CLK_SPDIF_MUX 305
+#define TEGRA124_CLK_CLK_OUT_1_MUX 306
+#define TEGRA124_CLK_CLK_OUT_2_MUX 307
+#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 309 */
+/* 310 */
+#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_XUSB_SS_DIV2 312
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#endif	/* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index af9bc9a..2860737 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -1,346 +1,19 @@
 /*
- * This header provides constants for binding nvidia,tegra124-car.
- *
- * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 185 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
- * above.
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
  */
 
+#include <dt-bindings/clock/tegra124-car-common.h>
+
 #ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 #define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 
-/* 0 */
-/* 1 */
-/* 2 */
-#define TEGRA124_CLK_ISPB 3
-#define TEGRA124_CLK_RTC 4
-#define TEGRA124_CLK_TIMER 5
-#define TEGRA124_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-/* 8 */
-#define TEGRA124_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA124_CLK_I2S1 11
-#define TEGRA124_CLK_I2C1 12
-/* 13 */
-#define TEGRA124_CLK_SDMMC1 14
-#define TEGRA124_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA124_CLK_PWM 17
-#define TEGRA124_CLK_I2S2 18
-/* 20 (register bit affects vi and vi_sensor) */
-/* 21 */
-#define TEGRA124_CLK_USBD 22
-#define TEGRA124_CLK_ISP 23
-/* 26 */
-/* 25 */
-#define TEGRA124_CLK_DISP2 26
-#define TEGRA124_CLK_DISP1 27
-#define TEGRA124_CLK_HOST1X 28
-#define TEGRA124_CLK_VCP 29
-#define TEGRA124_CLK_I2S0 30
-/* 31 */
+#define TEGRA124_CLK_PLL_X		227
+#define TEGRA124_CLK_PLL_X_OUT0		228
 
-#define TEGRA124_CLK_MC 32
-/* 33 */
-#define TEGRA124_CLK_APBDMA 34
-/* 35 */
-#define TEGRA124_CLK_KBC 36
-/* 37 */
-/* 38 */
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA124_CLK_KFUSE 40
-#define TEGRA124_CLK_SBC1 41
-#define TEGRA124_CLK_NOR 42
-/* 43 */
-#define TEGRA124_CLK_SBC2 44
-/* 45 */
-#define TEGRA124_CLK_SBC3 46
-#define TEGRA124_CLK_I2C5 47
-#define TEGRA124_CLK_DSIA 48
-/* 49 */
-#define TEGRA124_CLK_MIPI 50
-#define TEGRA124_CLK_HDMI 51
-#define TEGRA124_CLK_CSI 52
-/* 53 */
-#define TEGRA124_CLK_I2C2 54
-#define TEGRA124_CLK_UARTC 55
-#define TEGRA124_CLK_MIPI_CAL 56
-#define TEGRA124_CLK_EMC 57
-#define TEGRA124_CLK_USB2 58
-#define TEGRA124_CLK_USB3 59
-/* 60 */
-#define TEGRA124_CLK_VDE 61
-#define TEGRA124_CLK_BSEA 62
-#define TEGRA124_CLK_BSEV 63
+#define TEGRA124_CLK_CCLK_G		262
+#define TEGRA124_CLK_CCLK_LP		263
 
-/* 64 */
-#define TEGRA124_CLK_UARTD 65
-/* 66 */
-#define TEGRA124_CLK_I2C3 67
-#define TEGRA124_CLK_SBC4 68
-#define TEGRA124_CLK_SDMMC3 69
-#define TEGRA124_CLK_PCIE 70
-#define TEGRA124_CLK_OWR 71
-#define TEGRA124_CLK_AFI 72
-#define TEGRA124_CLK_CSITE 73
-/* 74 */
-/* 75 */
-#define TEGRA124_CLK_LA 76
-#define TEGRA124_CLK_TRACE 77
-#define TEGRA124_CLK_SOC_THERM 78
-#define TEGRA124_CLK_DTV 79
-/* 80 */
-#define TEGRA124_CLK_I2CSLOW 81
-#define TEGRA124_CLK_DSIB 82
-#define TEGRA124_CLK_TSEC 83
-/* 84 */
-/* 85 */
-/* 86 */
-/* 87 */
-/* 88 */
-#define TEGRA124_CLK_XUSB_HOST 89
-/* 90 */
-#define TEGRA124_CLK_MSENC 91
-#define TEGRA124_CLK_CSUS 92
-/* 93 */
-/* 94 */
-/* 95 (bit affects xusb_dev and xusb_dev_src) */
-
-/* 96 */
-/* 97 */
-/* 98 */
-#define TEGRA124_CLK_MSELECT 99
-#define TEGRA124_CLK_TSENSOR 100
-#define TEGRA124_CLK_I2S3 101
-#define TEGRA124_CLK_I2S4 102
-#define TEGRA124_CLK_I2C4 103
-#define TEGRA124_CLK_SBC5 104
-#define TEGRA124_CLK_SBC6 105
-#define TEGRA124_CLK_D_AUDIO 106
-#define TEGRA124_CLK_APBIF 107
-#define TEGRA124_CLK_DAM0 108
-#define TEGRA124_CLK_DAM1 109
-#define TEGRA124_CLK_DAM2 110
-#define TEGRA124_CLK_HDA2CODEC_2X 111
-/* 112 */
-#define TEGRA124_CLK_AUDIO0_2X 113
-#define TEGRA124_CLK_AUDIO1_2X 114
-#define TEGRA124_CLK_AUDIO2_2X 115
-#define TEGRA124_CLK_AUDIO3_2X 116
-#define TEGRA124_CLK_AUDIO4_2X 117
-#define TEGRA124_CLK_SPDIF_2X 118
-#define TEGRA124_CLK_ACTMON 119
-#define TEGRA124_CLK_EXTERN1 120
-#define TEGRA124_CLK_EXTERN2 121
-#define TEGRA124_CLK_EXTERN3 122
-#define TEGRA124_CLK_SATA_OOB 123
-#define TEGRA124_CLK_SATA 124
-#define TEGRA124_CLK_HDA 125
-/* 126 */
-#define TEGRA124_CLK_SE 127
-
-#define TEGRA124_CLK_HDA2HDMI 128
-#define TEGRA124_CLK_SATA_COLD 129
-/* 130 */
-/* 131 */
-/* 132 */
-/* 133 */
-/* 134 */
-/* 135 */
-/* 136 */
-/* 137 */
-/* 138 */
-/* 139 */
-/* 140 */
-/* 141 */
-/* 142 */
-/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
-/*      xusb_host_src and xusb_ss_src) */
-#define TEGRA124_CLK_CILAB 144
-#define TEGRA124_CLK_CILCD 145
-#define TEGRA124_CLK_CILE 146
-#define TEGRA124_CLK_DSIALP 147
-#define TEGRA124_CLK_DSIBLP 148
-#define TEGRA124_CLK_ENTROPY 149
-#define TEGRA124_CLK_DDS 150
-/* 151 */
-#define TEGRA124_CLK_DP2 152
-#define TEGRA124_CLK_AMX 153
-#define TEGRA124_CLK_ADX 154
-/* 155 (bit affects dfll_ref and dfll_soc) */
-#define TEGRA124_CLK_XUSB_SS 156
-/* 157 */
-/* 158 */
-/* 159 */
-
-/* 160 */
-/* 161 */
-/* 162 */
-/* 163 */
-/* 164 */
-/* 165 */
-#define TEGRA124_CLK_I2C6 166
-/* 167 */
-/* 168 */
-/* 169 */
-/* 170 */
-#define TEGRA124_CLK_VIM2_CLK 171
-/* 172 */
-/* 173 */
-/* 174 */
-/* 175 */
-#define TEGRA124_CLK_HDMI_AUDIO 176
-#define TEGRA124_CLK_CLK72MHZ 177
-#define TEGRA124_CLK_VIC03 178
-/* 179 */
-#define TEGRA124_CLK_ADX1 180
-#define TEGRA124_CLK_DPAUX 181
-#define TEGRA124_CLK_SOR0 182
-/* 183 */
-#define TEGRA124_CLK_GPU 184
-#define TEGRA124_CLK_AMX1 185
-/* 186 */
-/* 187 */
-/* 188 */
-/* 189 */
-/* 190 */
-/* 191 */
-#define TEGRA124_CLK_UARTB 192
-#define TEGRA124_CLK_VFIR 193
-#define TEGRA124_CLK_SPDIF_IN 194
-#define TEGRA124_CLK_SPDIF_OUT 195
-#define TEGRA124_CLK_VI 196
-#define TEGRA124_CLK_VI_SENSOR 197
-#define TEGRA124_CLK_FUSE 198
-#define TEGRA124_CLK_FUSE_BURN 199
-#define TEGRA124_CLK_CLK_32K 200
-#define TEGRA124_CLK_CLK_M 201
-#define TEGRA124_CLK_CLK_M_DIV2 202
-#define TEGRA124_CLK_CLK_M_DIV4 203
-#define TEGRA124_CLK_PLL_REF 204
-#define TEGRA124_CLK_PLL_C 205
-#define TEGRA124_CLK_PLL_C_OUT1 206
-#define TEGRA124_CLK_PLL_C2 207
-#define TEGRA124_CLK_PLL_C3 208
-#define TEGRA124_CLK_PLL_M 209
-#define TEGRA124_CLK_PLL_M_OUT1 210
-#define TEGRA124_CLK_PLL_P 211
-#define TEGRA124_CLK_PLL_P_OUT1 212
-#define TEGRA124_CLK_PLL_P_OUT2 213
-#define TEGRA124_CLK_PLL_P_OUT3 214
-#define TEGRA124_CLK_PLL_P_OUT4 215
-#define TEGRA124_CLK_PLL_A 216
-#define TEGRA124_CLK_PLL_A_OUT0 217
-#define TEGRA124_CLK_PLL_D 218
-#define TEGRA124_CLK_PLL_D_OUT0 219
-#define TEGRA124_CLK_PLL_D2 220
-#define TEGRA124_CLK_PLL_D2_OUT0 221
-#define TEGRA124_CLK_PLL_U 222
-#define TEGRA124_CLK_PLL_U_480M 223
-
-#define TEGRA124_CLK_PLL_U_60M 224
-#define TEGRA124_CLK_PLL_U_48M 225
-#define TEGRA124_CLK_PLL_U_12M 226
-#define TEGRA124_CLK_PLL_X 227
-#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_RE_VCO 229
-#define TEGRA124_CLK_PLL_RE_OUT 230
-#define TEGRA124_CLK_PLL_E 231
-#define TEGRA124_CLK_SPDIF_IN_SYNC 232
-#define TEGRA124_CLK_I2S0_SYNC 233
-#define TEGRA124_CLK_I2S1_SYNC 234
-#define TEGRA124_CLK_I2S2_SYNC 235
-#define TEGRA124_CLK_I2S3_SYNC 236
-#define TEGRA124_CLK_I2S4_SYNC 237
-#define TEGRA124_CLK_VIMCLK_SYNC 238
-#define TEGRA124_CLK_AUDIO0 239
-#define TEGRA124_CLK_AUDIO1 240
-#define TEGRA124_CLK_AUDIO2 241
-#define TEGRA124_CLK_AUDIO3 242
-#define TEGRA124_CLK_AUDIO4 243
-#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
-/* 250 */
-/* 251 */
-#define TEGRA124_CLK_XUSB_HOST_SRC 252
-#define TEGRA124_CLK_XUSB_FALCON_SRC 253
-#define TEGRA124_CLK_XUSB_FS_SRC 254
-#define TEGRA124_CLK_XUSB_SS_SRC 255
-
-#define TEGRA124_CLK_XUSB_DEV_SRC 256
-#define TEGRA124_CLK_XUSB_DEV 257
-#define TEGRA124_CLK_XUSB_HS_SRC 258
-#define TEGRA124_CLK_SCLK 259
-#define TEGRA124_CLK_HCLK 260
-#define TEGRA124_CLK_PCLK 261
-#define TEGRA124_CLK_CCLK_G 262
-#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_DFLL_REF 264
-#define TEGRA124_CLK_DFLL_SOC 265
-#define TEGRA124_CLK_VI_SENSOR2 266
-#define TEGRA124_CLK_PLL_P_OUT5 267
-#define TEGRA124_CLK_CML0 268
-#define TEGRA124_CLK_CML1 269
-#define TEGRA124_CLK_PLL_C4 270
-#define TEGRA124_CLK_PLL_DP 271
-#define TEGRA124_CLK_PLL_E_MUX 272
-/* 273 */
-/* 274 */
-/* 275 */
-/* 276 */
-/* 277 */
-/* 278 */
-/* 279 */
-/* 280 */
-/* 281 */
-/* 282 */
-/* 283 */
-/* 284 */
-/* 285 */
-/* 286 */
-/* 287 */
-
-/* 288 */
-/* 289 */
-/* 290 */
-/* 291 */
-/* 292 */
-/* 293 */
-/* 294 */
-/* 295 */
-/* 296 */
-/* 297 */
-/* 298 */
-/* 299 */
-#define TEGRA124_CLK_AUDIO0_MUX 300
-#define TEGRA124_CLK_AUDIO1_MUX 301
-#define TEGRA124_CLK_AUDIO2_MUX 302
-#define TEGRA124_CLK_AUDIO3_MUX 303
-#define TEGRA124_CLK_AUDIO4_MUX 304
-#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
-#define TEGRA124_CLK_DSIA_MUX 309
-#define TEGRA124_CLK_DSIB_MUX 310
-#define TEGRA124_CLK_SOR0_LVDS 311
-#define TEGRA124_CLK_XUSB_SS_DIV2 312
-
-#define TEGRA124_CLK_PLL_M_UD 313
-#define TEGRA124_CLK_PLL_C_UD 314
-
-#define TEGRA124_CLK_CLK_MAX 315
+#define TEGRA124_CLK_CLK_MAX		315
 
 #endif	/* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 2fbc804..226f772 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -13,7 +13,8 @@
 
 #define PULL_DISABLE		(1 << 3)
 #define INPUT_EN		(1 << 5)
-#define SLEWCTRL_FAST		(1 << 6)
+#define SLEWCTRL_SLOW		(1 << 6)
+#define SLEWCTRL_FAST		0
 
 /* update macro depending on INPUT_EN and PULL_ENA */
 #undef PIN_OUTPUT
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index 9c2e4f8..5f4d0189 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -18,7 +18,8 @@
 #define PULL_DISABLE		(1 << 16)
 #define PULL_UP			(1 << 17)
 #define INPUT_EN		(1 << 18)
-#define SLEWCTRL_FAST		(1 << 19)
+#define SLEWCTRL_SLOW		(1 << 19)
+#define SLEWCTRL_FAST		0
 #define DS0_PULL_UP_DOWN_EN	(1 << 27)
 
 #define PIN_OUTPUT		(PULL_DISABLE)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7c55dd5..66203b2 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -114,6 +114,7 @@
 	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
 	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
 	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
+	void	(*clear_eisr)(struct kvm_vcpu *vcpu);
 	u32	(*get_interrupt_status)(const struct kvm_vcpu *vcpu);
 	void	(*enable_underflow)(struct kvm_vcpu *vcpu);
 	void	(*disable_underflow)(struct kvm_vcpu *vcpu);
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644
index 0ca5f60..0000000
--- a/include/linux/clk-private.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- *  linux/include/linux/clk-private.h
- *
- *  Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
- *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_CLK_PRIVATE_H
-#define __LINUX_CLK_PRIVATE_H
-
-#include <linux/clk-provider.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-/*
- * WARNING: Do not include clk-private.h from any file that implements struct
- * clk_ops.  Doing so is a layering violation!
- *
- * This header exists only to allow for statically initialized clock data.  Any
- * static clock data must be defined in a separate file from the logic that
- * implements the clock operations for that same data.
- */
-
-#ifdef CONFIG_COMMON_CLK
-
-struct module;
-
-struct clk {
-	const char		*name;
-	const struct clk_ops	*ops;
-	struct clk_hw		*hw;
-	struct module		*owner;
-	struct clk		*parent;
-	const char		**parent_names;
-	struct clk		**parents;
-	u8			num_parents;
-	u8			new_parent_index;
-	unsigned long		rate;
-	unsigned long		new_rate;
-	struct clk		*new_parent;
-	struct clk		*new_child;
-	unsigned long		flags;
-	unsigned int		enable_count;
-	unsigned int		prepare_count;
-	unsigned long		accuracy;
-	int			phase;
-	struct hlist_head	children;
-	struct hlist_node	child_node;
-	struct hlist_node	debug_node;
-	unsigned int		notifier_count;
-#ifdef CONFIG_DEBUG_FS
-	struct dentry		*dentry;
-#endif
-	struct kref		ref;
-};
-
-/*
- * DOC: Basic clock implementations common to many platforms
- *
- * Each basic clock hardware type is comprised of a structure describing the
- * clock hardware, implementations of the relevant callbacks in struct clk_ops,
- * unique flags for that hardware type, a registration function and an
- * alternative macro for static initialization
- */
-
-#define DEFINE_CLK(_name, _ops, _flags, _parent_names,		\
-		_parents)					\
-	static struct clk _name = {				\
-		.name = #_name,					\
-		.ops = &_ops,					\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _parent_names,			\
-		.num_parents = ARRAY_SIZE(_parent_names),	\
-		.parents = _parents,				\
-		.flags = _flags | CLK_IS_BASIC,			\
-	}
-
-#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,		\
-				_fixed_rate_flags)		\
-	static struct clk _name;				\
-	static const char *_name##_parent_names[] = {};		\
-	static struct clk_fixed_rate _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.fixed_rate = _rate,				\
-		.flags = _fixed_rate_flags,			\
-	};							\
-	DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,		\
-			_name##_parent_names, NULL);
-
-#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,	\
-				_flags, _reg, _bit_idx,		\
-				_gate_flags, _lock)		\
-	static struct clk _name;				\
-	static const char *_name##_parent_names[] = {		\
-		_parent_name,					\
-	};							\
-	static struct clk *_name##_parents[] = {		\
-		_parent_ptr,					\
-	};							\
-	static struct clk_gate _name##_hw = {			\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.reg = _reg,					\
-		.bit_idx = _bit_idx,				\
-		.flags = _gate_flags,				\
-		.lock = _lock,					\
-	};							\
-	DEFINE_CLK(_name, clk_gate_ops, _flags,			\
-			_name##_parent_names, _name##_parents);
-
-#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,	\
-				_flags, _reg, _shift, _width,	\
-				_divider_flags, _table, _lock)	\
-	static struct clk _name;				\
-	static const char *_name##_parent_names[] = {		\
-		_parent_name,					\
-	};							\
-	static struct clk *_name##_parents[] = {		\
-		_parent_ptr,					\
-	};							\
-	static struct clk_divider _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.reg = _reg,					\
-		.shift = _shift,				\
-		.width = _width,				\
-		.flags = _divider_flags,			\
-		.table = _table,				\
-		.lock = _lock,					\
-	};							\
-	DEFINE_CLK(_name, clk_divider_ops, _flags,		\
-			_name##_parent_names, _name##_parents);
-
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,	\
-				_flags, _reg, _shift, _width,	\
-				_divider_flags, _lock)		\
-	_DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,	\
-				_flags, _reg, _shift, _width,	\
-				_divider_flags, NULL, _lock)
-
-#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name,		\
-				_parent_ptr, _flags, _reg,	\
-				_shift, _width, _divider_flags,	\
-				_table, _lock)			\
-	_DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,	\
-				_flags, _reg, _shift, _width,	\
-				_divider_flags, _table, _lock)	\
-
-#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags,	\
-				_reg, _shift, _width,		\
-				_mux_flags, _lock)		\
-	static struct clk _name;				\
-	static struct clk_mux _name##_hw = {			\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.reg = _reg,					\
-		.shift = _shift,				\
-		.mask = BIT(_width) - 1,			\
-		.flags = _mux_flags,				\
-		.lock = _lock,					\
-	};							\
-	DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,	\
-			_parents);
-
-#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,		\
-				_parent_ptr, _flags,		\
-				_mult, _div)			\
-	static struct clk _name;				\
-	static const char *_name##_parent_names[] = {		\
-		_parent_name,					\
-	};							\
-	static struct clk *_name##_parents[] = {		\
-		_parent_ptr,					\
-	};							\
-	static struct clk_fixed_factor _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.mult = _mult,					\
-		.div = _div,					\
-	};							\
-	DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,		\
-			_name##_parent_names, _name##_parents);
-
-/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev:	device initializing this clk, placeholder for now
- * @clk:	clk being initialized
- *
- * Initializes the lists in struct clk, queries the hardware for the
- * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * 	.name
- * 	.ops
- * 	.hw
- * 	.parent_names
- * 	.num_parents
- * 	.flags
- *
- * It is not necessary to call clk_register if __clk_init is used directly with
- * statically initialized clock data.
- *
- * Returns 0 on success, otherwise an error code.
- */
-int __clk_init(struct device *dev, struct clk *clk);
-
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
-
-#endif /* CONFIG_COMMON_CLK */
-#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index d936409..5591ea7 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -33,6 +33,7 @@
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 
 struct clk_hw;
+struct clk_core;
 struct dentry;
 
 /**
@@ -174,9 +175,12 @@
 					unsigned long parent_rate);
 	long		(*round_rate)(struct clk_hw *hw, unsigned long rate,
 					unsigned long *parent_rate);
-	long		(*determine_rate)(struct clk_hw *hw, unsigned long rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_hw);
+	long		(*determine_rate)(struct clk_hw *hw,
+					  unsigned long rate,
+					  unsigned long min_rate,
+					  unsigned long max_rate,
+					  unsigned long *best_parent_rate,
+					  struct clk_hw **best_parent_hw);
 	int		(*set_parent)(struct clk_hw *hw, u8 index);
 	u8		(*get_parent)(struct clk_hw *hw);
 	int		(*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -216,13 +220,17 @@
  * clk_foo and then referenced by the struct clk instance that uses struct
  * clk_foo's clk_ops
  *
- * @clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
  *
  * @init: pointer to struct clk_init_data that contains the init data shared
  * with the common clock framework.
  */
 struct clk_hw {
+	struct clk_core *core;
 	struct clk *clk;
 	const struct clk_init_data *init;
 };
@@ -294,6 +302,7 @@
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 bit_idx,
 		u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
 
 struct clk_div_table {
 	unsigned int	val;
@@ -352,6 +361,17 @@
 #define CLK_DIVIDER_READ_ONLY		BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+		unsigned int val, const struct clk_div_table *table,
+		unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *prate, const struct clk_div_table *table,
+		u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+		const struct clk_div_table *table, u8 width,
+		unsigned long flags);
+
 struct clk *clk_register_divider(struct device *dev, const char *name,
 		const char *parent_name, unsigned long flags,
 		void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +381,7 @@
 		void __iomem *reg, u8 shift, u8 width,
 		u8 clk_divider_flags, const struct clk_div_table *table,
 		spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
 
 /**
  * struct clk_mux - multiplexer clock
@@ -382,6 +403,8 @@
  *	register, and mask of mux bits are in higher 16-bit of this register.
  *	While setting the mux bits, higher 16-bit should also be updated to
  *	indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ *	frequency.
  */
 struct clk_mux {
 	struct clk_hw	hw;
@@ -396,7 +419,8 @@
 #define CLK_MUX_INDEX_ONE		BIT(0)
 #define CLK_MUX_INDEX_BIT		BIT(1)
 #define CLK_MUX_HIWORD_MASK		BIT(2)
-#define CLK_MUX_READ_ONLY	BIT(3) /* mux setting cannot be changed */
+#define CLK_MUX_READ_ONLY		BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST		BIT(4)
 
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
@@ -411,6 +435,8 @@
 		void __iomem *reg, u8 shift, u32 mask,
 		u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
+void clk_unregister_mux(struct clk *clk);
+
 void of_fixed_factor_clk_setup(struct device_node *node);
 
 /**
@@ -550,15 +576,29 @@
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long min_rate,
+			      unsigned long max_rate,
 			      unsigned long *best_parent_rate,
 			      struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+				   unsigned long rate,
+				   unsigned long min_rate,
+				   unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+			      unsigned long min_rate,
+			      unsigned long max_rate,
+			      unsigned long *best_parent_rate,
+			      struct clk_hw **best_parent_p);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+	dst->clk = src->clk;
+	dst->core = src->core;
+}
 
 /*
  * FIXME clock api without lock protection
  */
-int __clk_prepare(struct clk *clk);
-void __clk_unprepare(struct clk *clk);
-void __clk_reparent(struct clk *clk, struct clk *new_parent);
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
 
 struct of_device_id;
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c7f258a..68c16a6 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -125,6 +125,19 @@
  */
 int clk_get_phase(struct clk *clk);
 
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q);
+
 #else
 
 static inline long clk_get_accuracy(struct clk *clk)
@@ -142,6 +155,11 @@
 	return -ENOTSUPP;
 }
 
+static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+	return p == q;
+}
+
 #endif
 
 /**
@@ -302,6 +320,46 @@
 int clk_set_rate(struct clk *clk, unsigned long rate);
 
 /**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
  * @parent: parent clock source
@@ -374,6 +432,11 @@
 	return 0;
 }
 
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+	return true;
+}
+
 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
 {
 	return 0;
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644
index aed28c4..0000000
--- a/include/linux/clk/sunxi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2013 - Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-#include <linux/clk.h>
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output);
-
-#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 3ca9fca..19c4208 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -120,6 +120,4 @@
 }
 #endif
 
-void tegra_clocks_apply_init_table(void);
-
 #endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 55ef529..6784400 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_TI_H__
 #define __LINUX_CLK_TI_H__
 
+#include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 
 /**
@@ -217,6 +218,13 @@
 /* Maximum number of clock memmaps */
 #define CLK_MAX_MEMMAPS			4
 
+/* Static memmap indices */
+enum {
+	TI_CLKM_CM = 0,
+	TI_CLKM_PRM,
+	TI_CLKM_SCRM,
+};
+
 typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
 
 /**
@@ -263,6 +271,8 @@
 					   u8 index);
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
 				       unsigned long rate,
+				       unsigned long min_rate,
+				       unsigned long max_rate,
 				       unsigned long *best_parent_rate,
 				       struct clk_hw **best_parent_clk);
 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@
 				    unsigned long *parent_rate);
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
 					unsigned long rate,
+					unsigned long min_rate,
+					unsigned long max_rate,
 					unsigned long *best_parent_rate,
 					struct clk_hw **best_parent_clk);
 u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
 #endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d1ec10a..1b45e4a 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -202,7 +202,7 @@
 {
 }
 
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
 	switch (size) {
 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
@@ -259,10 +259,10 @@
  */
 
 #define READ_ONCE(x) \
-	({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-	({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+	({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index f551a92..306178d 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -126,6 +126,8 @@
 
 #ifdef CONFIG_CPU_IDLE
 extern void disable_cpuidle(void);
+extern bool cpuidle_not_available(struct cpuidle_driver *drv,
+				  struct cpuidle_device *dev);
 
 extern int cpuidle_select(struct cpuidle_driver *drv,
 			  struct cpuidle_device *dev);
@@ -150,11 +152,17 @@
 extern int cpuidle_enable_device(struct cpuidle_device *dev);
 extern void cpuidle_disable_device(struct cpuidle_device *dev);
 extern int cpuidle_play_dead(void);
-extern void cpuidle_enter_freeze(void);
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+				      struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+				struct cpuidle_device *dev);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
 #else
 static inline void disable_cpuidle(void) { }
+static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
+					 struct cpuidle_device *dev)
+{return true; }
 static inline int cpuidle_select(struct cpuidle_driver *drv,
 				 struct cpuidle_device *dev)
 {return -ENODEV; }
@@ -183,7 +191,12 @@
 {return -ENODEV; }
 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
-static inline void cpuidle_enter_freeze(void) { }
+static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+					     struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+				       struct cpuidle_device *dev)
+{return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
 	struct cpuidle_device *dev) {return NULL; }
 #endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 92c08cf..d835879 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -215,13 +215,16 @@
 #define DCACHE_LRU_LIST			0x00080000
 
 #define DCACHE_ENTRY_TYPE		0x00700000
-#define DCACHE_MISS_TYPE		0x00000000 /* Negative dentry */
-#define DCACHE_DIRECTORY_TYPE		0x00100000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE		0x00200000 /* Lookupless directory (presumed automount) */
-#define DCACHE_SYMLINK_TYPE		0x00300000 /* Symlink */
-#define DCACHE_FILE_TYPE		0x00400000 /* Other file type */
+#define DCACHE_MISS_TYPE		0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE		0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE		0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE		0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE		0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE		0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE		0x00600000 /* Symlink (or fallthru to such) */
 
 #define DCACHE_MAY_FREE			0x00800000
+#define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
 
 extern seqlock_t rename_lock;
 
@@ -423,6 +426,16 @@
 	return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
+static inline bool d_is_miss(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_whiteout(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
+}
+
 static inline bool d_can_lookup(const struct dentry *dentry)
 {
 	return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
@@ -443,14 +456,25 @@
 	return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
 }
 
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+	return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
 static inline bool d_is_file(const struct dentry *dentry)
 {
-	return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+	return d_is_reg(dentry) || d_is_special(dentry);
 }
 
 static inline bool d_is_negative(const struct dentry *dentry)
 {
-	return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+	// TODO: check d_is_whiteout(dentry) also.
+	return d_is_miss(dentry);
 }
 
 static inline bool d_is_positive(const struct dentry *dentry)
@@ -458,10 +482,75 @@
 	return !d_is_negative(dentry);
 }
 
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+	return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
 extern int sysctl_vfs_cache_pressure;
 
 static inline unsigned long vfs_pressure_ratio(unsigned long val)
 {
 	return mult_frac(val, sysctl_vfs_cache_pressure, 100);
 }
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+	return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+	return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file.  The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+	struct inode *inode = upper->d_inode;
+
+	return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file.  It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+	return upper;
+}
+
 #endif	/* __LINUX_DCACHE_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 51f7cca..4173a8f 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -33,6 +33,8 @@
  * @units:		Measurment unit for this attribute.
  * @unit_expo:		Exponent used in the data.
  * @size:		Size in bytes for data size.
+ * @logical_minimum:	Logical minimum value for this attribute.
+ * @logical_maximum:	Logical maximum value for this attribute.
  */
 struct hid_sensor_hub_attribute_info {
 	u32 usage_id;
@@ -146,6 +148,7 @@
 
 /**
 * sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev:	Hub device instance.
 * @usage_id:	Attribute usage id of parent physical device as per spec
 * @attr_usage_id:	Attribute usage id as per spec
 * @report_id:	Report id to look for
@@ -160,6 +163,7 @@
 			u32 attr_usage_id, u32 report_id);
 /**
 * sensor_hub_set_feature() - Feature set request
+* @hsdev:	Hub device instance.
 * @report_id:	Report id to look for
 * @field_index:	Field index inside a report
 * @value:	Value to set
@@ -172,6 +176,7 @@
 
 /**
 * sensor_hub_get_feature() - Feature get request
+* @hsdev:	Hub device instance.
 * @report_id:	Report id to look for
 * @field_index:	Field index inside a report
 * @value:	Place holder for return value
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 7c76959..f17da50 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -130,8 +130,6 @@
  * @probe: Callback for device binding
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
- * @suspend: Callback for device suspend
- * @resume: Callback for device resume
  * @alert: Alert callback, for example for the SMBus alert protocol
  * @command: Callback for bus-wide signaling (optional)
  * @driver: Device driver model driver
@@ -174,8 +172,6 @@
 
 	/* driver model interfaces that don't relate to enumeration  */
 	void (*shutdown)(struct i2c_client *);
-	int (*suspend)(struct i2c_client *, pm_message_t mesg);
-	int (*resume)(struct i2c_client *);
 
 	/* Alert callback, for example for the SMBus alert protocol.
 	 * The format and meaning of the data value depends on the protocol.
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index d9b05b5..2e88580 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -52,11 +52,17 @@
  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
  *                Used by threaded interrupts which need to keep the
  *                irq line disabled until the threaded handler has been run.
- * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend.  Does not guarantee
+ *                   that this interrupt will wake the system from a suspended
+ *                   state.  See Documentation/power/suspend-and-interrupts.txt
  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
  * IRQF_NO_THREAD - Interrupt cannot be threaded
  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
  *                resume time.
+ * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
+ *                interrupt handler after suspending interrupts. For system
+ *                wakeup devices users need to implement wakeup detection in
+ *                their interrupt handlers.
  */
 #define IRQF_DISABLED		0x00000020
 #define IRQF_SHARED		0x00000080
@@ -70,6 +76,7 @@
 #define IRQF_FORCE_RESUME	0x00008000
 #define IRQF_NO_THREAD		0x00010000
 #define IRQF_EARLY_RESUME	0x00020000
+#define IRQF_COND_SUSPEND	0x00040000
 
 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 800544b..781974a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -166,6 +166,11 @@
 
 #define GITS_TRANSLATER			0x10040
 
+#define GITS_CTLR_ENABLE		(1U << 0)
+#define GITS_CTLR_QUIESCENT		(1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT	13
+#define GITS_TYPER_DEVBITS(r)		((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
 #define GITS_TYPER_PTA			(1UL << 19)
 
 #define GITS_CBASER_VALID		(1UL << 63)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 420f77b..e6a6aac 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -243,7 +243,6 @@
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern unsigned int gic_get_timer_pending(void);
 extern int gic_get_c0_compare_int(void);
 extern int gic_get_c0_perfcount_int(void);
 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index faf433a..dd1109f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -78,6 +78,7 @@
 #ifdef CONFIG_PM_SLEEP
 	unsigned int		nr_actions;
 	unsigned int		no_suspend_depth;
+	unsigned int		cond_suspend_depth;
 	unsigned int		force_resume_depth;
 #endif
 #ifdef CONFIG_PROC_FS
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72ba725..5bb0744 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -5,6 +5,7 @@
 
 struct kmem_cache;
 struct page;
+struct vm_struct;
 
 #ifdef CONFIG_KASAN
 
@@ -49,15 +50,11 @@
 void kasan_slab_alloc(struct kmem_cache *s, void *object);
 void kasan_slab_free(struct kmem_cache *s, void *object);
 
-#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
-
 int kasan_module_alloc(void *addr, size_t size);
-void kasan_module_free(void *addr);
+void kasan_free_shadow(const struct vm_struct *vm);
 
 #else /* CONFIG_KASAN */
 
-#define MODULE_ALIGN 1
-
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
 static inline void kasan_enable_current(void) {}
@@ -82,7 +79,7 @@
 static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_module_free(void *addr) {}
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
 #endif /* CONFIG_KASAN */
 
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 75ae2e2..a19bcf9 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -156,8 +156,14 @@
 	KDB_REASON_SYSTEM_NMI,	/* In NMI due to SYSTEM cmd; regs valid */
 } kdb_reason_t;
 
+enum kdb_msgsrc {
+	KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+	KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
 extern int kdb_trap_printk;
-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+				      va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
 
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 2bbc62a..551f854 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -427,7 +427,7 @@
 
 enum mlx4_update_qp_attr {
 	MLX4_UPDATE_QP_SMAC		= 1 << 0,
-	MLX4_UPDATE_QP_VSD		= 1 << 2,
+	MLX4_UPDATE_QP_VSD		= 1 << 1,
 	MLX4_UPDATE_QP_SUPPORTED_ATTRS	= (1 << 2) - 1
 };
 
diff --git a/include/linux/module.h b/include/linux/module.h
index 42999fe..b03485b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -344,6 +344,10 @@
 	unsigned long *ftrace_callsites;
 #endif
 
+#ifdef CONFIG_LIVEPATCH
+	bool klp_alive;
+#endif
+
 #ifdef CONFIG_MODULE_UNLOAD
 	/* What modules depend on me? */
 	struct list_head source_list;
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index f755626..4d0cb9b 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -84,4 +84,12 @@
 
 /* Any cleanup before freeing mod->module_init */
 void module_arch_freeing_init(struct module *mod);
+
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5897b4e..dcf6ec2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -965,9 +965,12 @@
  *	Used to add FDB entries to dump requests. Implementers should add
  *	entries to skb and update idx with the number of entries.
  *
- * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
+ * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
+ *			     u16 flags)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
  *			     struct net_device *dev, u32 filter_mask)
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ *			     u16 flags);
  *
  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  *	Called to change device carrier. Soft-devices (like dummy, team, etc)
@@ -2342,6 +2345,7 @@
 
 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
 {
+	grc->offset = 0;
 	grc->delta = 0;
 }
 
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6d627b9..b01ccf3 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -180,7 +180,6 @@
         /* NFSv4 state */
 	struct list_head	open_states;
 	struct nfs_delegation __rcu *delegation;
-	fmode_t			 delegation_state;
 	struct rw_semaphore	rwsem;
 
 	/* pNFS layout information */
@@ -344,6 +343,7 @@
 extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
 extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
 extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
@@ -356,8 +356,9 @@
 extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
+extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
 extern int nfs_setattr(struct dentry *, struct iattr *);
-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
 extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
 				struct nfs4_label *label);
 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
@@ -370,6 +371,7 @@
 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
 extern u64 nfs_compat_user_ino64(u64 fileid);
 extern void nfs_fattr_init(struct nfs_fattr *fattr);
+extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
 extern unsigned long nfs_inc_attr_generation_counter(void);
 
 extern struct nfs_fattr *nfs_alloc_fattr(void);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 38d96ba..4cb3eaa 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1167,8 +1167,15 @@
 	struct nfstime4			date;
 };
 
+struct nfs41_bind_conn_to_session_args {
+	struct nfs_client		*client;
+	struct nfs4_sessionid		sessionid;
+	u32				dir;
+	bool				use_conn_in_rdma_mode;
+};
+
 struct nfs41_bind_conn_to_session_res {
-	struct nfs4_session		*session;
+	struct nfs4_sessionid		sessionid;
 	u32				dir;
 	bool				use_conn_in_rdma_mode;
 };
@@ -1185,6 +1192,8 @@
 
 struct nfs41_create_session_args {
 	struct nfs_client	       *client;
+	u64				clientid;
+	uint32_t			seqid;
 	uint32_t			flags;
 	uint32_t			cb_program;
 	struct nfs4_channel_attrs	fc_attrs;	/* Fore Channel */
@@ -1192,7 +1201,11 @@
 };
 
 struct nfs41_create_session_res {
-	struct nfs_client	       *client;
+	struct nfs4_sessionid		sessionid;
+	uint32_t			seqid;
+	uint32_t			flags;
+	struct nfs4_channel_attrs	fc_attrs;	/* Fore Channel */
+	struct nfs4_channel_attrs	bc_attrs;	/* Back Channel */
 };
 
 struct nfs41_reclaim_complete_args {
@@ -1351,7 +1364,7 @@
 };
 
 struct nfs_commit_info {
-	spinlock_t			*lock;
+	spinlock_t			*lock;	/* inode->i_lock */
 	struct nfs_mds_commit_info	*mds;
 	struct pnfs_ds_commit_info	*ds;
 	struct nfs_direct_req		*dreq;	/* O_DIRECT request */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 19a5d4b..0adad4a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -17,7 +17,6 @@
 
 #include <uapi/linux/nvme.h>
 #include <linux/pci.h>
-#include <linux/miscdevice.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
@@ -62,8 +61,6 @@
 	NVME_CSTS_SHST_MASK	= 3 << 2,
 };
 
-#define NVME_VS(major, minor)	(major << 16 | minor)
-
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
 
@@ -91,9 +88,10 @@
 	struct nvme_bar __iomem *bar;
 	struct list_head namespaces;
 	struct kref kref;
-	struct miscdevice miscdev;
+	struct device *device;
 	work_func_t reset_workfn;
 	struct work_struct reset_work;
+	struct work_struct probe_work;
 	char name[12];
 	char serial[20];
 	char model[40];
@@ -105,7 +103,6 @@
 	u16 abort_limit;
 	u8 event_limit;
 	u8 vwc;
-	u8 initialized;
 };
 
 /*
@@ -121,6 +118,7 @@
 	unsigned ns_id;
 	int lba_shift;
 	int ms;
+	int pi_type;
 	u64 mode_select_num_blocks;
 	u32 mode_select_block_len;
 };
@@ -138,6 +136,7 @@
 	int nents;		/* Used in scatterlist */
 	int length;		/* Of data, in bytes */
 	dma_addr_t first_dma;
+	struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
 	struct scatterlist sg[0];
 };
 
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 8a860f0..611a6911 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,7 +84,7 @@
 static inline void of_platform_depopulate(struct device *parent) { }
 #endif
 
-#ifdef CONFIG_OF_DYNAMIC
+#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
 extern void of_platform_register_reconfig_notifier(void);
 #else
 static inline void of_platform_register_reconfig_notifier(void) { }
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 72c0415d..18eccef 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -82,7 +82,7 @@
 
 static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
 {
-	return ERR_PTR(-ENOSYS);
+	return NULL;
 }
 
 static inline void pinctrl_put(struct pinctrl *p)
@@ -93,7 +93,7 @@
 							struct pinctrl *p,
 							const char *name)
 {
-	return ERR_PTR(-ENOSYS);
+	return NULL;
 }
 
 static inline int pinctrl_select_state(struct pinctrl *p,
@@ -104,7 +104,7 @@
 
 static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
 {
-	return ERR_PTR(-ENOSYS);
+	return NULL;
 }
 
 static inline void devm_pinctrl_put(struct pinctrl *p)
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
similarity index 98%
rename from arch/blackfin/include/asm/bfin_rotary.h
rename to include/linux/platform_data/bfin_rotary.h
index 8895a75..9882937 100644
--- a/arch/blackfin/include/asm/bfin_rotary.h
+++ b/include/linux/platform_data/bfin_rotary.h
@@ -40,6 +40,7 @@
 	unsigned short debounce;	/* 0..17 */
 	unsigned short mode;
 	unsigned short pm_wakeup;
+	unsigned short *pin_list;
 };
 
 /* CNT_CONFIG bitmasks */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5885127..d438eeb 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -54,10 +54,11 @@
  * @buckets: size * hash buckets
  */
 struct bucket_table {
-	size_t				size;
-	unsigned int			locks_mask;
-	spinlock_t			*locks;
-	struct rhash_head __rcu		*buckets[];
+	size_t			size;
+	unsigned int		locks_mask;
+	spinlock_t		*locks;
+
+	struct rhash_head __rcu	*buckets[] ____cacheline_aligned_in_smp;
 };
 
 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
@@ -78,12 +79,6 @@
  * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
  * @hashfn: Function to hash key
  * @obj_hashfn: Function to hash object
- * @grow_decision: If defined, may return true if table should expand
- * @shrink_decision: If defined, may return true if table should shrink
- *
- * Note: when implementing the grow and shrink decision function, min/max
- * shift must be enforced, otherwise, resizing watermarks they set may be
- * useless.
  */
 struct rhashtable_params {
 	size_t			nelem_hint;
@@ -97,10 +92,6 @@
 	size_t			locks_mul;
 	rht_hashfn_t		hashfn;
 	rht_obj_hashfn_t	obj_hashfn;
-	bool			(*grow_decision)(const struct rhashtable *ht,
-						 size_t new_size);
-	bool			(*shrink_decision)(const struct rhashtable *ht,
-						   size_t new_size);
 };
 
 /**
@@ -192,9 +183,6 @@
 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
 
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
-
 int rhashtable_expand(struct rhashtable *ht);
 int rhashtable_shrink(struct rhashtable *ht);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41c60e5..6d77432 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -363,9 +363,6 @@
  */
 extern void show_stack(struct task_struct *task, unsigned long *sp);
 
-void io_schedule(void);
-long io_schedule_timeout(long timeout);
-
 extern void cpu_init (void);
 extern void trap_init(void);
 extern void update_process_times(int user);
@@ -422,6 +419,13 @@
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+	io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
 struct nsproxy;
 struct user_namespace;
 
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index baf3e1d..d10965f 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -143,13 +143,13 @@
 	unsigned char		iotype;			/* io access style */
 	unsigned char		unused1;
 
-#define UPIO_PORT		(0)			/* 8b I/O port access */
-#define UPIO_HUB6		(1)			/* Hub6 ISA card */
-#define UPIO_MEM		(2)			/* 8b MMIO access */
-#define UPIO_MEM32		(3)			/* 32b little endian */
-#define UPIO_MEM32BE		(4)			/* 32b big endian */
-#define UPIO_AU			(5)			/* Au1x00 and RT288x type IO */
-#define UPIO_TSI		(6)			/* Tsi108/109 type IO */
+#define UPIO_PORT		(SERIAL_IO_PORT)	/* 8b I/O port access */
+#define UPIO_HUB6		(SERIAL_IO_HUB6)	/* Hub6 ISA card */
+#define UPIO_MEM		(SERIAL_IO_MEM)		/* 8b MMIO access */
+#define UPIO_MEM32		(SERIAL_IO_MEM32)	/* 32b little endian */
+#define UPIO_AU			(SERIAL_IO_AU)		/* Au1x00 and RT288x type IO */
+#define UPIO_TSI		(SERIAL_IO_TSI)		/* Tsi108/109 type IO */
+#define UPIO_MEM32BE		(SERIAL_IO_MEM32BE)	/* 32b big endian */
 
 	unsigned int		read_status_mask;	/* driver specific */
 	unsigned int		ignore_status_mask;	/* driver specific */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 30007af..f54d665 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -948,6 +948,13 @@
 	to->l4_hash = from->l4_hash;
 };
 
+static inline void skb_sender_cpu_clear(struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+	skb->sender_cpu = 0;
+#endif
+}
+
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 {
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index ed9489d..856d34d 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -649,7 +649,7 @@
  * sequence completes.  On some systems, many such sequences can execute as
  * as single programmed DMA transfer.  On all systems, these messages are
  * queued, and might complete after transactions to other devices.  Messages
- * sent to a given spi_device are alway executed in FIFO order.
+ * sent to a given spi_device are always executed in FIFO order.
  *
  * The code that submits an spi_message (and its spi_transfers)
  * to the lower layers is responsible for managing its memory.
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 7e61a17..694eecb 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -89,8 +89,11 @@
 static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
 static inline void rpc_count_iostats(const struct rpc_task *task,
 				     struct rpc_iostats *stats) {}
-static inline void rpc_count_iostats_metrics(const struct rpc_task *,
-					     struct rpc_iostats *) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+					     struct rpc_iostats *stats)
+{
+}
+
 static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
 static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
 
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index fc52e30..5eac316 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -314,6 +314,8 @@
 }
 
 #endif
+
+#if IS_ENABLED(CONFIG_THERMAL)
 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
 		void *, struct thermal_zone_device_ops *,
 		const struct thermal_zone_params *, int, int);
@@ -340,8 +342,58 @@
 		struct thermal_cooling_device *, int);
 void thermal_cdev_update(struct thermal_cooling_device *);
 void thermal_notify_framework(struct thermal_zone_device *, int);
+#else
+static inline struct thermal_zone_device *thermal_zone_device_register(
+	const char *type, int trips, int mask, void *devdata,
+	struct thermal_zone_device_ops *ops,
+	const struct thermal_zone_params *tzp,
+	int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_zone_device_unregister(
+	struct thermal_zone_device *tz)
+{ }
+static inline int thermal_zone_bind_cooling_device(
+	struct thermal_zone_device *tz, int trip,
+	struct thermal_cooling_device *cdev,
+	unsigned long upper, unsigned long lower)
+{ return -ENODEV; }
+static inline int thermal_zone_unbind_cooling_device(
+	struct thermal_zone_device *tz, int trip,
+	struct thermal_cooling_device *cdev)
+{ return -ENODEV; }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+{ }
+static inline struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+	const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+	char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cooling_device_unregister(
+	struct thermal_cooling_device *cdev)
+{ }
+static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
+		const char *name)
+{ return ERR_PTR(-ENODEV); }
+static inline int thermal_zone_get_temp(
+		struct thermal_zone_device *tz, unsigned long *temp)
+{ return -ENODEV; }
+static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{ return -ENODEV; }
+static inline struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+	struct thermal_cooling_device *cdev, int trip)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{ }
+static inline void thermal_notify_framework(struct thermal_zone_device *tz,
+	int trip)
+{ }
+#endif /* CONFIG_THERMAL */
 
-#ifdef CONFIG_NET
+#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
 extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 						enum events event);
 #else
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 07a0226..7188029 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -98,6 +98,8 @@
 			size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
 
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+
 static inline size_t iov_iter_count(struct iov_iter *i)
 {
 	return i->count;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 9bb547c..704a1ab 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -190,8 +190,7 @@
  * @num_ports: the number of different ports this device will have.
  * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
  *	(0 = end-point size)
- * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
- *	(0 = end-point size)
+ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
  * @calc_num_ports: pointer to a function to determine how many ports this
  *	device has dynamically.  It will be called after the probe()
  *	callback is called, but before attach()
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index d320411..2d67b89 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -26,6 +26,7 @@
  * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
  *         operations documented below
  * @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
  */
 struct vfio_device_ops {
 	char	*name;
@@ -38,6 +39,7 @@
 	long	(*ioctl)(void *device_data, unsigned int cmd,
 			 unsigned long arg);
 	int	(*mmap)(void *device_data, struct vm_area_struct *vma);
+	void	(*request)(void *device_data, unsigned int count);
 };
 
 extern int vfio_add_group_dev(struct device *dev,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 7d7acb3..0ec5983 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -17,6 +17,7 @@
 #define VM_VPAGES		0x00000010	/* buffer for pages was vmalloc'ed */
 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
 #define VM_NO_GUARD		0x00000040      /* don't add guard page */
+#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 74db135..f597846 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -70,7 +70,8 @@
 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
 
-	WORK_OFFQ_CANCELING	= (1 << WORK_OFFQ_FLAG_BASE),
+	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
+	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
 
 	/*
 	 * When a work item is off queue, its high bits point to the last
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 1c1ad46..fe328c5 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -171,7 +171,7 @@
  * @return    Checksum of buffer.
  */
 
-u16 cfpkt_iterate(struct cfpkt *pkt,
+int cfpkt_iterate(struct cfpkt *pkt,
 		u16 (*iter_func)(u16 chks, void *buf, u16 len),
 		u16 data);
 
diff --git a/include/net/dst.h b/include/net/dst.h
index a8ae4e7..0fb99a2 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -481,6 +481,7 @@
 enum {
 	XFRM_LOOKUP_ICMP = 1 << 0,
 	XFRM_LOOKUP_QUEUE = 1 << 1,
+	XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
 };
 
 struct flowi;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9eaaa78..decb9a0 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -119,6 +119,22 @@
 			   const struct nft_data *data,
 			   enum nft_data_types type);
 
+
+/**
+ *	struct nft_userdata - user defined data associated with an object
+ *
+ *	@len: length of the data
+ *	@data: content
+ *
+ *	The presence of user data is indicated in an object specific fashion,
+ *	so a length of zero can't occur and the value "len" indicates data
+ *	of length len + 1.
+ */
+struct nft_userdata {
+	u8			len;
+	unsigned char		data[0];
+};
+
 /**
  *	struct nft_set_elem - generic representation of set elements
  *
@@ -380,7 +396,7 @@
  *	@handle: rule handle
  *	@genmask: generation mask
  *	@dlen: length of expression data
- *	@ulen: length of user data (used for comments)
+ *	@udata: user data is appended to the rule
  *	@data: expression data
  */
 struct nft_rule {
@@ -388,7 +404,7 @@
 	u64				handle:42,
 					genmask:2,
 					dlen:12,
-					ulen:8;
+					udata:1;
 	unsigned char			data[]
 		__attribute__((aligned(__alignof__(struct nft_expr))));
 };
@@ -476,7 +492,7 @@
 	return (struct nft_expr *)&rule->data[rule->dlen];
 }
 
-static inline void *nft_userdata(const struct nft_rule *rule)
+static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
 {
 	return (void *)&rule->data[rule->dlen];
 }
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index eabd3a0..c73e7ab 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -91,6 +91,7 @@
 
 #define VXLAN_N_VID     (1u << 24)
 #define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
 struct vxlan_metadata {
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h
index 0210797..dc10c52 100644
--- a/include/soc/at91/at91sam9_ddrsdr.h
+++ b/include/soc/at91/at91sam9_ddrsdr.h
@@ -92,7 +92,7 @@
 #define		AT91_DDRSDRC_UPD_MR	(3 << 20)	 /* Update load mode register and extended mode register */
 
 #define AT91_DDRSDRC_MDR	0x20	/* Memory Device Register */
-#define		AT91_DDRSDRC_MD		(3 << 0)		/* Memory Device Type */
+#define		AT91_DDRSDRC_MD		(7 << 0)	/* Memory Device Type */
 #define			AT91_DDRSDRC_MD_SDR		0
 #define			AT91_DDRSDRC_MD_LOW_POWER_SDR	1
 #define			AT91_DDRSDRC_MD_LOW_POWER_DDR	3
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
similarity index 98%
rename from drivers/target/iscsi/iscsi_target_core.h
rename to include/target/iscsi/iscsi_target_core.h
index cbcff38..d3583d3 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -880,4 +880,18 @@
 	struct iscsi_portal_group	*discovery_tpg;
 };
 
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+	u32 ttt;
+
+	spin_lock_bh(&session->ttt_lock);
+	ttt = session->targ_xfer_tag++;
+	if (ttt == 0xFFFFFFFF)
+		ttt = session->targ_xfer_tag++;
+	spin_unlock_bh(&session->ttt_lock);
+
+	return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 #endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
similarity index 100%
rename from drivers/target/iscsi/iscsi_target_stat.h
rename to include/target/iscsi/iscsi_target_stat.h
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index daef9da..e6bb166 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 #include <linux/list.h>
-#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+#include "iscsi_target_core.h"
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME	16
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4a8795a..672150b6 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -407,7 +407,7 @@
 	/* Activate Persistence across Target Power Loss enabled
 	 * for SCSI device */
 	int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN			8192
+#define PR_APTPL_BUF_LEN			262144
 	u32 pr_generation;
 	spinlock_t registration_lock;
 	spinlock_t aptpl_reg_lock;
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 26386cf..aef9a81 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -115,7 +115,13 @@
 	__le16			nawun;
 	__le16			nawupf;
 	__le16			nacwu;
-	__u8			rsvd40[80];
+	__le16			nabsn;
+	__le16			nabo;
+	__le16			nabspf;
+	__u16			rsvd46;
+	__le64			nvmcap[2];
+	__u8			rsvd64[40];
+	__u8			nguid[16];
 	__u8			eui64[8];
 	struct nvme_lbaf	lbaf[16];
 	__u8			rsvd192[192];
@@ -124,10 +130,22 @@
 
 enum {
 	NVME_NS_FEAT_THIN	= 1 << 0,
+	NVME_NS_FLBAS_LBA_MASK	= 0xf,
+	NVME_NS_FLBAS_META_EXT	= 0x10,
 	NVME_LBAF_RP_BEST	= 0,
 	NVME_LBAF_RP_BETTER	= 1,
 	NVME_LBAF_RP_GOOD	= 2,
 	NVME_LBAF_RP_DEGRADED	= 3,
+	NVME_NS_DPC_PI_LAST	= 1 << 4,
+	NVME_NS_DPC_PI_FIRST	= 1 << 3,
+	NVME_NS_DPC_PI_TYPE3	= 1 << 2,
+	NVME_NS_DPC_PI_TYPE2	= 1 << 1,
+	NVME_NS_DPC_PI_TYPE1	= 1 << 0,
+	NVME_NS_DPS_PI_FIRST	= 1 << 3,
+	NVME_NS_DPS_PI_MASK	= 0x7,
+	NVME_NS_DPS_PI_TYPE1	= 1,
+	NVME_NS_DPS_PI_TYPE2	= 2,
+	NVME_NS_DPS_PI_TYPE3	= 3,
 };
 
 struct nvme_smart_log {
@@ -261,6 +279,10 @@
 	NVME_RW_DSM_LATENCY_LOW		= 3 << 4,
 	NVME_RW_DSM_SEQ_REQ		= 1 << 6,
 	NVME_RW_DSM_COMPRESSED		= 1 << 7,
+	NVME_RW_PRINFO_PRCHK_REF	= 1 << 10,
+	NVME_RW_PRINFO_PRCHK_APP	= 1 << 11,
+	NVME_RW_PRINFO_PRCHK_GUARD	= 1 << 12,
+	NVME_RW_PRINFO_PRACT		= 1 << 13,
 };
 
 struct nvme_dsm_cmd {
@@ -549,6 +571,8 @@
 	__u32	result;
 };
 
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID		_IO('N', 0x40)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 89f6350..31891d9 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -185,4 +185,9 @@
 #define PR_MPX_ENABLE_MANAGEMENT  43
 #define PR_MPX_DISABLE_MANAGEMENT 44
 
+#define PR_SET_FP_MODE		45
+#define PR_GET_FP_MODE		46
+# define PR_FP_MODE_FR		(1 << 0)	/* 64b FP registers */
+# define PR_FP_MODE_FRE		(1 << 1)	/* 32b compatibility */
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 5e0d0ed..25331f9 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -65,6 +65,10 @@
 #define SERIAL_IO_PORT	0
 #define SERIAL_IO_HUB6	1
 #define SERIAL_IO_MEM	2
+#define SERIAL_IO_MEM32	  3
+#define SERIAL_IO_AU	  4
+#define SERIAL_IO_TSI	  5
+#define SERIAL_IO_MEM32BE 6
 
 #define UART_CLEAR_FIFO		0x01
 #define UART_USE_FIFO		0x02
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 19d5219..242cf0c 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -9,3 +9,4 @@
 header-y += tc_skbedit.h
 header-y += tc_vlan.h
 header-y += tc_bpf.h
+header-y += tc_connmark.h
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 29715d2..82889c3 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -333,6 +333,7 @@
 	VFIO_PCI_MSI_IRQ_INDEX,
 	VFIO_PCI_MSIX_IRQ_INDEX,
 	VFIO_PCI_ERR_IRQ_INDEX,
+	VFIO_PCI_REQ_IRQ_INDEX,
 	VFIO_PCI_NUM_IRQS
 };
 
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 3c53eec..19c66fc 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -60,7 +60,7 @@
 	__u32 size_max;
 	/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
 	__u32 seg_max;
-	/* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */
+	/* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
 	struct virtio_blk_geometry {
 		__u16 cylinders;
 		__u8 heads;
@@ -119,7 +119,11 @@
 #define VIRTIO_BLK_T_BARRIER	0x80000000
 #endif /* !VIRTIO_BLK_NO_LEGACY */
 
-/* This is the first element of the read scatter-gather list. */
+/*
+ * This comes first in the read scatter-gather list.
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
+ * this is the first element of the read scatter-gather list.
+ */
 struct virtio_blk_outhdr {
 	/* VIRTIO_BLK_T* */
 	__virtio32 type;
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index 42b9370..cc18ef8 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -29,8 +29,16 @@
 
 #include <linux/virtio_types.h>
 
-#define VIRTIO_SCSI_CDB_SIZE   32
-#define VIRTIO_SCSI_SENSE_SIZE 96
+/* Default values of the CDB and sense data size configuration fields */
+#define VIRTIO_SCSI_CDB_DEFAULT_SIZE   32
+#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96
+
+#ifndef VIRTIO_SCSI_CDB_SIZE
+#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE
+#endif
+#ifndef VIRTIO_SCSI_SENSE_SIZE
+#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE
+#endif
 
 /* SCSI command request, followed by data-out */
 struct virtio_scsi_cmd_req {
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 867cc50..b513e66 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -90,6 +90,7 @@
 };
 
 enum {
+	IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
 	IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
 	IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -201,6 +202,28 @@
 	__u8  reserved[4];
 };
 
+struct ib_uverbs_ex_query_device {
+	__u32 comp_mask;
+	__u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+	__u64 general_caps;
+	struct {
+		__u32 rc_odp_caps;
+		__u32 uc_odp_caps;
+		__u32 ud_odp_caps;
+	} per_transport_caps;
+	__u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+	struct ib_uverbs_query_device_resp base;
+	__u32 comp_mask;
+	__u32 response_length;
+	struct ib_uverbs_odp_caps odp_caps;
+};
+
 struct ib_uverbs_query_port {
 	__u64 response;
 	__u8  port_num;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 60de61f..c8ed15d 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -689,6 +689,7 @@
 };
 
 struct omap_dss_device {
+	struct kobject kobj;
 	struct device *dev;
 
 	struct module *owner;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 7491ee5..8333821 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -46,4 +46,30 @@
 }
 #endif
 
+#ifdef CONFIG_PREEMPT
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+}
+
+#else
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+	__this_cpu_write(xen_in_preemptible_hcall, true);
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+	__this_cpu_write(xen_in_preemptible_hcall, false);
+}
+
+#endif /* CONFIG_PREEMPT */
+
 #endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b78f21c..b0f1c9e 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -114,9 +114,9 @@
 					   const char *mod_name);
 
 #define xenbus_register_frontend(drv) \
-	__xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+	__xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME)
 #define xenbus_register_backend(drv) \
-	__xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+	__xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME)
 
 void xenbus_unregister_driver(struct xenbus_driver *drv);
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1d1fe93..fc7f474 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -548,9 +548,6 @@
 
 	rcu_read_lock();
 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-		if (cp == root_cs)
-			continue;
-
 		/* skip the whole subtree if @cp doesn't have any CPU */
 		if (cpumask_empty(cp->cpus_allowed)) {
 			pos_css = css_rightmost_descendant(pos_css);
@@ -873,7 +870,7 @@
 		 * If it becomes empty, inherit the effective mask of the
 		 * parent, which is guaranteed to have some CPUs.
 		 */
-		if (cpumask_empty(new_cpus))
+		if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
 			cpumask_copy(new_cpus, parent->effective_cpus);
 
 		/* Skip the whole subtree if the cpumask remains the same. */
@@ -1129,7 +1126,7 @@
 		 * If it becomes empty, inherit the effective mask of the
 		 * parent, which is guaranteed to have some MEMs.
 		 */
-		if (nodes_empty(*new_mems))
+		if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
 			*new_mems = parent->effective_mems;
 
 		/* Skip the whole subtree if the nodemask remains the same. */
@@ -1979,7 +1976,9 @@
 
 	spin_lock_irq(&callback_lock);
 	cs->mems_allowed = parent->mems_allowed;
+	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
 	spin_unlock_irq(&callback_lock);
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 07ce18c..0874e2e 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -604,7 +604,7 @@
 		   online_cpus)
 		cpu_relax();
 	if (!time_left)
-		pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
+		pr_crit("Timed out waiting for secondary CPUs.\n");
 
 	/*
 	 * At this point the primary processor is completely
@@ -696,6 +696,14 @@
 
 	if (arch_kgdb_ops.enable_nmi)
 		arch_kgdb_ops.enable_nmi(0);
+	/*
+	 * Avoid entering the debugger if we were triggered due to an oops
+	 * but panic_timeout indicates the system should automatically
+	 * reboot on panic. We don't want to get stuck waiting for input
+	 * on such systems, especially if its "just" an oops.
+	 */
+	if (signo != SIGTRAP && panic_timeout)
+		return 1;
 
 	memset(ks, 0, sizeof(struct kgdb_state));
 	ks->cpu			= raw_smp_processor_id();
@@ -828,6 +836,15 @@
 			    unsigned long val,
 			    void *data)
 {
+	/*
+	 * Avoid entering the debugger if we were triggered due to a panic
+	 * We don't want to get stuck waiting for input from user in such case.
+	 * panic_timeout indicates the system should automatically
+	 * reboot on panic.
+	 */
+	if (panic_timeout)
+		return NOTIFY_DONE;
+
 	if (dbg_kdb_mode)
 		kdb_printf("PANIC: %s\n", (char *)data);
 	kgdb_breakpoint();
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 7c70812..fc1ef73 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -439,7 +439,7 @@
  *	substituted for %d, %x or %o in the prompt.
  */
 
-char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
 {
 	if (prompt && kdb_prompt_str != prompt)
 		strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@@ -548,7 +548,7 @@
 	return 0;
 }
 
-int vkdb_printf(const char *fmt, va_list ap)
+int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
 	int diag;
 	int linecount;
@@ -680,6 +680,12 @@
 			size_avail = sizeof(kdb_buffer) - len;
 			goto kdb_print_out;
 		}
+		if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
+			/*
+			 * This was a interactive search (using '/' at more
+			 * prompt) and it has completed. Clear the flag.
+			 */
+			kdb_grepping_flag = 0;
 		/*
 		 * at this point the string is a full line and
 		 * should be printed, up to the null.
@@ -691,19 +697,20 @@
 	 * Write to all consoles.
 	 */
 	retlen = strlen(kdb_buffer);
+	cp = (char *) printk_skip_level(kdb_buffer);
 	if (!dbg_kdb_mode && kgdb_connected) {
-		gdbstub_msg_write(kdb_buffer, retlen);
+		gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
 	} else {
 		if (dbg_io_ops && !dbg_io_ops->is_console) {
-			len = retlen;
-			cp = kdb_buffer;
+			len = retlen - (cp - kdb_buffer);
+			cp2 = cp;
 			while (len--) {
-				dbg_io_ops->write_char(*cp);
-				cp++;
+				dbg_io_ops->write_char(*cp2);
+				cp2++;
 			}
 		}
 		while (c) {
-			c->write(c, kdb_buffer, retlen);
+			c->write(c, cp, retlen - (cp - kdb_buffer));
 			touch_nmi_watchdog();
 			c = c->next;
 		}
@@ -711,7 +718,10 @@
 	if (logging) {
 		saved_loglevel = console_loglevel;
 		console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-		printk(KERN_INFO "%s", kdb_buffer);
+		if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
+			printk("%s", kdb_buffer);
+		else
+			pr_info("%s", kdb_buffer);
 	}
 
 	if (KDB_STATE(PAGER)) {
@@ -794,11 +804,23 @@
 			kdb_nextline = linecount - 1;
 			kdb_printf("\r");
 			suspend_grep = 1; /* for this recursion */
+		} else if (buf1[0] == '/' && !kdb_grepping_flag) {
+			kdb_printf("\r");
+			kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
+				   kdbgetenv("SEARCHPROMPT") ?: "search> ");
+			*strchrnul(kdb_grep_string, '\n') = '\0';
+			kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
+			suspend_grep = 1; /* for this recursion */
 		} else if (buf1[0] && buf1[0] != '\n') {
 			/* user hit something other than enter */
 			suspend_grep = 1; /* for this recursion */
-			kdb_printf("\nOnly 'q' or 'Q' are processed at more "
-				   "prompt, input ignored\n");
+			if (buf1[0] != '/')
+				kdb_printf(
+				    "\nOnly 'q', 'Q' or '/' are processed at "
+				    "more prompt, input ignored\n");
+			else
+				kdb_printf("\n'/' cannot be used during | "
+					   "grep filtering, input ignored\n");
 		} else if (kdb_grepping_flag) {
 			/* user hit enter */
 			suspend_grep = 1; /* for this recursion */
@@ -844,7 +866,7 @@
 	int r;
 
 	va_start(ap, fmt);
-	r = vkdb_printf(fmt, ap);
+	r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
 	va_end(ap);
 
 	return r;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 7b40c5f..4121345 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -50,8 +50,7 @@
 static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
 module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
 
-#define GREP_LEN 256
-char kdb_grep_string[GREP_LEN];
+char kdb_grep_string[KDB_GREP_STRLEN];
 int kdb_grepping_flag;
 EXPORT_SYMBOL(kdb_grepping_flag);
 int kdb_grep_leading;
@@ -870,7 +869,7 @@
 	len = strlen(cp);
 	if (!len)
 		return;
-	if (len >= GREP_LEN) {
+	if (len >= KDB_GREP_STRLEN) {
 		kdb_printf("search string too long\n");
 		return;
 	}
@@ -915,13 +914,12 @@
 	char *cp;
 	char *cpp, quoted;
 	kdbtab_t *tp;
-	int i, escaped, ignore_errors = 0, check_grep;
+	int i, escaped, ignore_errors = 0, check_grep = 0;
 
 	/*
 	 * First tokenize the command string.
 	 */
 	cp = (char *)cmdstr;
-	kdb_grepping_flag = check_grep = 0;
 
 	if (KDB_FLAG(CMD_INTERRUPT)) {
 		/* Previous command was interrupted, newline must not
@@ -1247,7 +1245,6 @@
 		kdb_printf("due to NonMaskable Interrupt @ "
 			   kdb_machreg_fmt "\n",
 			   instruction_pointer(regs));
-		kdb_dumpregs(regs);
 		break;
 	case KDB_REASON_SSTEP:
 	case KDB_REASON_BREAK:
@@ -1281,6 +1278,9 @@
 		 */
 		kdb_nextline = 1;
 		KDB_STATE_CLEAR(SUPPRESS);
+		kdb_grepping_flag = 0;
+		/* ensure the old search does not leak into '/' commands */
+		kdb_grep_string[0] = '\0';
 
 		cmdbuf = cmd_cur;
 		*cmdbuf = '\0';
@@ -2256,7 +2256,7 @@
 	/*
 	 * Validate cpunum
 	 */
-	if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
+	if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
 		return KDB_BADCPUNUM;
 
 	dbg_switch_cpu = cpunum;
@@ -2583,7 +2583,7 @@
 #define K(x) ((x) << (PAGE_SHIFT - 10))
 	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
 		   "Buffers:        %8lu kB\n",
-		   val.totalram, val.freeram, val.bufferram);
+		   K(val.totalram), K(val.freeram), K(val.bufferram));
 	return 0;
 }
 
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index eaacd16..75014d7 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -196,7 +196,9 @@
 
 /* Miscellaneous functions and data areas */
 extern int kdb_grepping_flag;
+#define KDB_GREPPING_FLAG_SEARCH 0x8000
 extern char kdb_grep_string[];
+#define KDB_GREP_STRLEN 256
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
@@ -209,7 +211,7 @@
 extern void kdb_print_nameval(const char *name, unsigned long val);
 extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
 extern void kdb_meminfo_proc_show(void);
-extern char *kdb_getstr(char *, size_t, char *);
+extern char *kdb_getstr(char *, size_t, const char *);
 extern void kdb_gdb_state_pass(char *buf);
 
 /* Defines for kdb_symbol_print */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f04daab..453ef61 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3591,7 +3591,7 @@
 	ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
 	WARN_ON_ONCE(ctx->parent_ctx);
 	perf_remove_from_context(event, true);
-	mutex_unlock(&ctx->mutex);
+	perf_event_ctx_unlock(event, ctx);
 
 	_free_event(event);
 }
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 196a06f..886d09e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1474,8 +1474,13 @@
 	 * otherwise we'll have trouble later trying to figure out
 	 * which interrupt is which (messes up the interrupt freeing
 	 * logic etc).
+	 *
+	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
+	 * it cannot be set along with IRQF_NO_SUSPEND.
 	 */
-	if ((irqflags & IRQF_SHARED) && !dev_id)
+	if (((irqflags & IRQF_SHARED) && !dev_id) ||
+	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
+	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
 		return -EINVAL;
 
 	desc = irq_to_desc(irq);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 3ca5325..5204a6d 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -43,9 +43,12 @@
 
 	if (action->flags & IRQF_NO_SUSPEND)
 		desc->no_suspend_depth++;
+	else if (action->flags & IRQF_COND_SUSPEND)
+		desc->cond_suspend_depth++;
 
 	WARN_ON_ONCE(desc->no_suspend_depth &&
-		     desc->no_suspend_depth != desc->nr_actions);
+		     (desc->no_suspend_depth +
+			desc->cond_suspend_depth) != desc->nr_actions);
 }
 
 /*
@@ -61,6 +64,8 @@
 
 	if (action->flags & IRQF_NO_SUSPEND)
 		desc->no_suspend_depth--;
+	else if (action->flags & IRQF_COND_SUSPEND)
+		desc->cond_suspend_depth--;
 }
 
 static bool suspend_device_irq(struct irq_desc *desc, int irq)
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index ff7f47d..3f9f1d6 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -89,16 +89,28 @@
 /* sets obj->mod if object is not vmlinux and module is found */
 static void klp_find_object_module(struct klp_object *obj)
 {
+	struct module *mod;
+
 	if (!klp_is_module(obj))
 		return;
 
 	mutex_lock(&module_mutex);
 	/*
-	 * We don't need to take a reference on the module here because we have
-	 * the klp_mutex, which is also taken by the module notifier.  This
-	 * prevents any module from unloading until we release the klp_mutex.
+	 * We do not want to block removal of patched modules and therefore
+	 * we do not take a reference here. The patches are removed by
+	 * a going module handler instead.
 	 */
-	obj->mod = find_module(obj->name);
+	mod = find_module(obj->name);
+	/*
+	 * Do not mess work of the module coming and going notifiers.
+	 * Note that the patch might still be needed before the going handler
+	 * is called. Module functions can be called even in the GOING state
+	 * until mod->exit() finishes. This is especially important for
+	 * patches that modify semantic of the functions.
+	 */
+	if (mod && mod->klp_alive)
+		obj->mod = mod;
+
 	mutex_unlock(&module_mutex);
 }
 
@@ -248,11 +260,12 @@
 	/* first, check if it's an exported symbol */
 	preempt_disable();
 	sym = find_symbol(name, NULL, NULL, true, true);
-	preempt_enable();
 	if (sym) {
 		*addr = sym->value;
+		preempt_enable();
 		return 0;
 	}
+	preempt_enable();
 
 	/* otherwise check if it's in another .o within the patch module */
 	return klp_find_object_symbol(pmod->name, name, addr);
@@ -314,12 +327,12 @@
 	rcu_read_lock();
 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 				      stack_node);
-	rcu_read_unlock();
-
 	if (WARN_ON_ONCE(!func))
-		return;
+		goto unlock;
 
 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
+unlock:
+	rcu_read_unlock();
 }
 
 static int klp_disable_func(struct klp_func *func)
@@ -731,7 +744,7 @@
 	func->state = KLP_DISABLED;
 
 	return kobject_init_and_add(&func->kobj, &klp_ktype_func,
-				    obj->kobj, func->old_name);
+				    obj->kobj, "%s", func->old_name);
 }
 
 /* parts of the initialization that is done only when the object is loaded */
@@ -766,6 +779,7 @@
 		return -EINVAL;
 
 	obj->state = KLP_DISABLED;
+	obj->mod = NULL;
 
 	klp_find_object_module(obj);
 
@@ -807,7 +821,7 @@
 	patch->state = KLP_DISABLED;
 
 	ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
-				   klp_root_kobj, patch->mod->name);
+				   klp_root_kobj, "%s", patch->mod->name);
 	if (ret)
 		goto unlock;
 
@@ -960,6 +974,15 @@
 
 	mutex_lock(&klp_mutex);
 
+	/*
+	 * Each module has to know that the notifier has been called.
+	 * We never know what module will get patched by a new patch.
+	 */
+	if (action == MODULE_STATE_COMING)
+		mod->klp_alive = true;
+	else /* MODULE_STATE_GOING */
+		mod->klp_alive = false;
+
 	list_for_each_entry(patch, &klp_patches, list) {
 		for (obj = patch->objs; obj->funcs; obj++) {
 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 3059bc2f..6357265 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1193,7 +1193,9 @@
 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
 	if (unlikely(ret)) {
-		remove_waiter(lock, &waiter);
+		__set_current_state(TASK_RUNNING);
+		if (rt_mutex_has_waiters(lock))
+			remove_waiter(lock, &waiter);
 		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
 	}
 
diff --git a/kernel/module.c b/kernel/module.c
index b34813f..b3d634e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,7 +56,6 @@
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
-#include <linux/kasan.h>
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
@@ -1814,7 +1813,6 @@
 void __weak module_memfree(void *module_region)
 {
 	vfree(module_region);
-	kasan_module_free(module_region);
 }
 
 void __weak module_arch_cleanup(struct module *mod)
@@ -2313,11 +2311,13 @@
 	info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
 	info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
 	mod->core_size += strtab_size;
+	mod->core_size = debug_align(mod->core_size);
 
 	/* Put string table section at end of init part of module. */
 	strsect->sh_flags |= SHF_ALLOC;
 	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
 					 info->index.str) | INIT_OFFSET_MASK;
+	mod->init_size = debug_align(mod->init_size);
 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
 }
 
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
index cbd69d8..2ca4a8b 100644
--- a/kernel/printk/console_cmdline.h
+++ b/kernel/printk/console_cmdline.h
@@ -3,7 +3,7 @@
 
 struct console_cmdline
 {
-	char	name[8];			/* Name of the driver	    */
+	char	name[16];			/* Name of the driver	    */
 	int	index;				/* Minor dev. to use	    */
 	char	*options;			/* Options for the driver   */
 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c06df7d..bb0635b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1811,7 +1811,7 @@
 
 #ifdef CONFIG_KGDB_KDB
 	if (unlikely(kdb_trap_printk)) {
-		r = vkdb_printf(fmt, args);
+		r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
 		return r;
 	}
 #endif
@@ -2464,6 +2464,7 @@
 	for (i = 0, c = console_cmdline;
 	     i < MAX_CMDLINECONSOLES && c->name[0];
 	     i++, c++) {
+		BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
 		if (strcmp(c->name, newcon->name) != 0)
 			continue;
 		if (newcon->index >= 0 &&
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0d7bbe3..0a571e9 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -326,6 +326,7 @@
 	special = t->rcu_read_unlock_special;
 	if (special.b.need_qs) {
 		rcu_preempt_qs();
+		t->rcu_read_unlock_special.b.need_qs = false;
 		if (!t->rcu_read_unlock_special.s) {
 			local_irq_restore(flags);
 			return;
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 8a2e230..eae160d 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -87,8 +87,7 @@
 	 * so we don't have to move tasks around upon policy change,
 	 * or flail around trying to allocate bandwidth on the fly.
 	 * A bandwidth exception in __sched_setscheduler() allows
-	 * the policy change to proceed.  Thereafter, task_group()
-	 * returns &root_task_group, so zero bandwidth is required.
+	 * the policy change to proceed.
 	 */
 	free_rt_sched_group(tg);
 	tg->rt_se = root_task_group.rt_se;
@@ -115,9 +114,6 @@
 	if (tg != &root_task_group)
 		return false;
 
-	if (p->sched_class != &fair_sched_class)
-		return false;
-
 	/*
 	 * We can only assume the task group can't go away on us if
 	 * autogroup_move_group() can see us on ->thread_group list.
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 7052d3f..8d0f35d 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -274,7 +274,7 @@
 	 * first without taking the lock so we can
 	 * return early in the blocking case.
 	 */
-	if (!ACCESS_ONCE(x->done))
+	if (!READ_ONCE(x->done))
 		return 0;
 
 	spin_lock_irqsave(&x->wait.lock, flags);
@@ -297,6 +297,21 @@
  */
 bool completion_done(struct completion *x)
 {
-	return !!ACCESS_ONCE(x->done);
+	if (!READ_ONCE(x->done))
+		return false;
+
+	/*
+	 * If ->done, we need to wait for complete() to release ->wait.lock
+	 * otherwise we can end up freeing the completion before complete()
+	 * is done referencing it.
+	 *
+	 * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
+	 * the loads of ->done and ->wait.lock such that we cannot observe
+	 * the lock before complete() acquires it while observing the ->done
+	 * after it's acquired the lock.
+	 */
+	smp_rmb();
+	spin_unlock_wait(&x->wait.lock);
+	return true;
 }
 EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 13049aa..f0f831e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -307,66 +307,6 @@
 int sysctl_sched_rt_runtime = 950000;
 
 /*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	lockdep_assert_held(&p->pi_lock);
-
-	for (;;) {
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-	__acquires(p->pi_lock)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	for (;;) {
-		raw_spin_lock_irqsave(&p->pi_lock, *flags);
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-			return rq;
-		raw_spin_unlock(&rq->lock);
-		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-static void __task_rq_unlock(struct rq *rq)
-	__releases(rq->lock)
-{
-	raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-	__releases(rq->lock)
-	__releases(p->pi_lock)
-{
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
-/*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
 static struct rq *this_rq_lock(void)
@@ -2899,7 +2839,7 @@
 	preempt_disable();
 }
 
-static void preempt_schedule_common(void)
+static void __sched notrace preempt_schedule_common(void)
 {
 	do {
 		__preempt_count_add(PREEMPT_ACTIVE);
@@ -4418,36 +4358,29 @@
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-	struct rq *rq = raw_rq();
-
-	delayacct_blkio_start();
-	atomic_inc(&rq->nr_iowait);
-	blk_flush_plug(current);
-	current->in_iowait = 1;
-	schedule();
-	current->in_iowait = 0;
-	atomic_dec(&rq->nr_iowait);
-	delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-	struct rq *rq = raw_rq();
+	int old_iowait = current->in_iowait;
+	struct rq *rq;
 	long ret;
 
-	delayacct_blkio_start();
-	atomic_inc(&rq->nr_iowait);
-	blk_flush_plug(current);
 	current->in_iowait = 1;
+	if (old_iowait)
+		blk_schedule_flush_plug(current);
+	else
+		blk_flush_plug(current);
+
+	delayacct_blkio_start();
+	rq = raw_rq();
+	atomic_inc(&rq->nr_iowait);
 	ret = schedule_timeout(timeout);
-	current->in_iowait = 0;
+	current->in_iowait = old_iowait;
 	atomic_dec(&rq->nr_iowait);
 	delayacct_blkio_end();
+
 	return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -7642,6 +7575,12 @@
 {
 	struct task_struct *g, *p;
 
+	/*
+	 * Autogroups do not have RT tasks; see autogroup_create().
+	 */
+	if (task_group_is_autogroup(tg))
+		return 0;
+
 	for_each_process_thread(g, p) {
 		if (rt_task(p) && task_group(p) == tg)
 			return 1;
@@ -7734,6 +7673,17 @@
 {
 	int i, err = 0;
 
+	/*
+	 * Disallowing the root group RT runtime is BAD, it would disallow the
+	 * kernel creating (and or operating) RT threads.
+	 */
+	if (tg == &root_task_group && rt_runtime == 0)
+		return -EINVAL;
+
+	/* No period doesn't make any sense. */
+	if (rt_period == 0)
+		return -EINVAL;
+
 	mutex_lock(&rt_constraints_mutex);
 	read_lock(&tasklist_lock);
 	err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7790,9 +7740,6 @@
 	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
 	rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-	if (rt_period == 0)
-		return -EINVAL;
-
 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a027799..3fa8fa6 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -511,16 +511,10 @@
 						     struct sched_dl_entity,
 						     dl_timer);
 	struct task_struct *p = dl_task_of(dl_se);
+	unsigned long flags;
 	struct rq *rq;
-again:
-	rq = task_rq(p);
-	raw_spin_lock(&rq->lock);
 
-	if (rq != task_rq(p)) {
-		/* Task was moved, retrying. */
-		raw_spin_unlock(&rq->lock);
-		goto again;
-	}
+	rq = task_rq_lock(current, &flags);
 
 	/*
 	 * We need to take care of several possible races here:
@@ -541,6 +535,26 @@
 
 	sched_clock_tick();
 	update_rq_clock(rq);
+
+	/*
+	 * If the throttle happened during sched-out; like:
+	 *
+	 *   schedule()
+	 *     deactivate_task()
+	 *       dequeue_task_dl()
+	 *         update_curr_dl()
+	 *           start_dl_timer()
+	 *         __dequeue_task_dl()
+	 *     prev->on_rq = 0;
+	 *
+	 * We can be both throttled and !queued. Replenish the counter
+	 * but do not enqueue -- wait for our wakeup to do that.
+	 */
+	if (!task_on_rq_queued(p)) {
+		replenish_dl_entity(dl_se, dl_se);
+		goto unlock;
+	}
+
 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
 	if (dl_task(rq->curr))
 		check_preempt_curr_dl(rq, p, 0);
@@ -555,7 +569,7 @@
 		push_dl_task(rq);
 #endif
 unlock:
-	raw_spin_unlock(&rq->lock);
+	task_rq_unlock(rq, current, &flags);
 
 	return HRTIMER_NORESTART;
 }
@@ -898,6 +912,7 @@
 		rq->curr->dl.dl_yielded = 1;
 		p->dl.runtime = 0;
 	}
+	update_rq_clock(rq);
 	update_curr_dl(rq);
 }
 
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 94b2d7b..80014a1 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -82,6 +82,7 @@
 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 	int next_state, entered_state;
 	unsigned int broadcast;
+	bool reflect;
 
 	/*
 	 * Check if the idle task must be rescheduled. If it is the
@@ -105,6 +106,9 @@
 	 */
 	rcu_idle_enter();
 
+	if (cpuidle_not_available(drv, dev))
+		goto use_default;
+
 	/*
 	 * Suspend-to-idle ("freeze") is a system state in which all user space
 	 * has been frozen, all I/O devices have been suspended and the only
@@ -115,30 +119,24 @@
 	 * until a proper wakeup interrupt happens.
 	 */
 	if (idle_should_freeze()) {
-		cpuidle_enter_freeze();
-		local_irq_enable();
-		goto exit_idle;
-	}
-
-	/*
-	 * Ask the cpuidle framework to choose a convenient idle state.
-	 * Fall back to the default arch idle method on errors.
-	 */
-	next_state = cpuidle_select(drv, dev);
-	if (next_state < 0) {
-use_default:
-		/*
-		 * We can't use the cpuidle framework, let's use the default
-		 * idle routine.
-		 */
-		if (current_clr_polling_and_test())
+		entered_state = cpuidle_enter_freeze(drv, dev);
+		if (entered_state >= 0) {
 			local_irq_enable();
-		else
-			arch_cpu_idle();
+			goto exit_idle;
+		}
 
-		goto exit_idle;
+		reflect = false;
+		next_state = cpuidle_find_deepest_state(drv, dev);
+	} else {
+		reflect = true;
+		/*
+		 * Ask the cpuidle framework to choose a convenient idle state.
+		 */
+		next_state = cpuidle_select(drv, dev);
 	}
-
+	/* Fall back to the default arch idle method on errors. */
+	if (next_state < 0)
+		goto use_default;
 
 	/*
 	 * The idle task must be scheduled, it is pointless to
@@ -183,7 +181,8 @@
 	/*
 	 * Give the governor an opportunity to reflect on the outcome
 	 */
-	cpuidle_reflect(dev, entered_state);
+	if (reflect)
+		cpuidle_reflect(dev, entered_state);
 
 exit_idle:
 	__current_set_polling();
@@ -196,6 +195,19 @@
 
 	rcu_idle_exit();
 	start_critical_timings();
+	return;
+
+use_default:
+	/*
+	 * We can't use the cpuidle framework, let's use the default
+	 * idle routine.
+	 */
+	if (current_clr_polling_and_test())
+		local_irq_enable();
+	else
+		arch_cpu_idle();
+
+	goto exit_idle;
 }
 
 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0870db2..dc0f435 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1380,6 +1380,82 @@
 
 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	for (;;) {
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	for (;;) {
+		raw_spin_lock_irqsave(&p->pi_lock, *flags);
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		/*
+		 *	move_queued_task()		task_rq_lock()
+		 *
+		 *	ACQUIRE (rq->lock)
+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
+		 *	[S] ->cpu = new_cpu		[L] task_rq()
+		 *					[L] ->on_rq
+		 *	RELEASE (rq->lock)
+		 *
+		 * If we observe the old cpu in task_rq_lock, the acquire of
+		 * the old rq->lock will fully serialize against the stores.
+		 *
+		 * If we observe the new cpu in task_rq_lock, the acquire will
+		 * pair with the WMB to ensure we must then also see migrating.
+		 */
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+			return rq;
+		raw_spin_unlock(&rq->lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+	__releases(rq->lock)
+{
+	raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+	__releases(rq->lock)
+	__releases(p->pi_lock)
+{
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
diff --git a/kernel/sys.c b/kernel/sys.c
index ea9c881..a03d9cd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -97,6 +97,12 @@
 #ifndef MPX_DISABLE_MANAGEMENT
 # define MPX_DISABLE_MANAGEMENT(a)	(-EINVAL)
 #endif
+#ifndef GET_FP_MODE
+# define GET_FP_MODE(a)		(-EINVAL)
+#endif
+#ifndef SET_FP_MODE
+# define SET_FP_MODE(a,b)	(-EINVAL)
+#endif
 
 /*
  * this is where the system-wide overflow UID and GID are defined, for
@@ -1102,6 +1108,7 @@
 /*
  * Work around broken programs that cannot handle "Linux 3.0".
  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
  */
 static int override_release(char __user *release, size_t len)
 {
@@ -1121,7 +1128,7 @@
 				break;
 			rest++;
 		}
-		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
 		copy = clamp_t(size_t, len, 1, sizeof(buf));
 		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
 		ret = copy_to_user(release, buf, copy + 1);
@@ -2219,6 +2226,12 @@
 			return -EINVAL;
 		error = MPX_DISABLE_MANAGEMENT(me);
 		break;
+	case PR_SET_FP_MODE:
+		error = SET_FP_MODE(me, arg2);
+		break;
+	case PR_GET_FP_MODE:
+		error = GET_FP_MODE(me);
+		break;
 	default:
 		error = -EINVAL;
 		break;
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 4b585e0..0f60b08 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -633,10 +633,14 @@
 	if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
 		return -EPERM;
 
-	if (txc->modes & ADJ_FREQUENCY) {
-		if (LONG_MIN / PPM_SCALE > txc->freq)
+	/*
+	 * Check for potential multiplication overflows that can
+	 * only happen on 64-bit systems:
+	 */
+	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
+		if (LLONG_MIN / PPM_SCALE > txc->freq)
 			return -EINVAL;
-		if (LONG_MAX / PPM_SCALE < txc->freq)
+		if (LLONG_MAX / PPM_SCALE < txc->freq)
 			return -EINVAL;
 	}
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 45e5cb1..4f22802 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1059,6 +1059,12 @@
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static struct ftrace_ops *removed_ops;
@@ -2041,8 +2047,12 @@
 		if (!ftrace_rec_count(rec))
 			rec->flags = 0;
 		else
-			/* Just disable the record (keep REGS state) */
-			rec->flags &= ~FTRACE_FL_ENABLED;
+			/*
+			 * Just disable the record, but keep the ops TRAMP
+			 * and REGS states. The _EN flags must be disabled though.
+			 */
+			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
+					FTRACE_FL_REGS_EN);
 	}
 
 	return FTRACE_UPDATE_MAKE_NOP;
@@ -2688,24 +2698,36 @@
 
 static void ftrace_startup_sysctl(void)
 {
+	int command;
+
 	if (unlikely(ftrace_disabled))
 		return;
 
 	/* Force update next time */
 	saved_ftrace_func = NULL;
 	/* ftrace_start_up is true if we want ftrace running */
-	if (ftrace_start_up)
-		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+	if (ftrace_start_up) {
+		command = FTRACE_UPDATE_CALLS;
+		if (ftrace_graph_active)
+			command |= FTRACE_START_FUNC_RET;
+		ftrace_startup_enable(command);
+	}
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
+	int command;
+
 	if (unlikely(ftrace_disabled))
 		return;
 
 	/* ftrace_start_up is true if ftrace is running */
-	if (ftrace_start_up)
-		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+	if (ftrace_start_up) {
+		command = FTRACE_DISABLE_CALLS;
+		if (ftrace_graph_active)
+			command |= FTRACE_STOP_FUNC_RET;
+		ftrace_run_update_code(command);
+	}
 }
 
 static cycle_t		ftrace_update_time;
@@ -5558,12 +5580,12 @@
 
 	if (ftrace_enabled) {
 
-		ftrace_startup_sysctl();
-
 		/* we are starting ftrace again */
 		if (ftrace_ops_list != &ftrace_list_end)
 			update_ftrace_function();
 
+		ftrace_startup_sysctl();
+
 	} else {
 		/* stopping ftrace calls (just send to ftrace_stub) */
 		ftrace_trace_function = ftrace_stub;
@@ -5590,8 +5612,6 @@
 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
 };
 
-static int ftrace_graph_active;
-
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
 	return 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f288493..41ff75b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2728,19 +2728,57 @@
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
+struct cwt_wait {
+	wait_queue_t		wait;
+	struct work_struct	*work;
+};
+
+static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+	if (cwait->work != key)
+		return 0;
+	return autoremove_wake_function(wait, mode, sync, key);
+}
+
 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 {
+	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
 	unsigned long flags;
 	int ret;
 
 	do {
 		ret = try_to_grab_pending(work, is_dwork, &flags);
 		/*
-		 * If someone else is canceling, wait for the same event it
-		 * would be waiting for before retrying.
+		 * If someone else is already canceling, wait for it to
+		 * finish.  flush_work() doesn't work for PREEMPT_NONE
+		 * because we may get scheduled between @work's completion
+		 * and the other canceling task resuming and clearing
+		 * CANCELING - flush_work() will return false immediately
+		 * as @work is no longer busy, try_to_grab_pending() will
+		 * return -ENOENT as @work is still being canceled and the
+		 * other canceling task won't be able to clear CANCELING as
+		 * we're hogging the CPU.
+		 *
+		 * Let's wait for completion using a waitqueue.  As this
+		 * may lead to the thundering herd problem, use a custom
+		 * wake function which matches @work along with exclusive
+		 * wait and wakeup.
 		 */
-		if (unlikely(ret == -ENOENT))
-			flush_work(work);
+		if (unlikely(ret == -ENOENT)) {
+			struct cwt_wait cwait;
+
+			init_wait(&cwait.wait);
+			cwait.wait.func = cwt_wakefn;
+			cwait.work = work;
+
+			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
+						  TASK_UNINTERRUPTIBLE);
+			if (work_is_canceling(work))
+				schedule();
+			finish_wait(&cancel_waitq, &cwait.wait);
+		}
 	} while (unlikely(ret < 0));
 
 	/* tell other tasks trying to grab @work to back off */
@@ -2749,6 +2787,16 @@
 
 	flush_work(work);
 	clear_work_data(work);
+
+	/*
+	 * Paired with prepare_to_wait() above so that either
+	 * waitqueue_active() is visible here or !work_is_canceling() is
+	 * visible there.
+	 */
+	smp_mb();
+	if (waitqueue_active(&cancel_waitq))
+		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
+
 	return ret;
 }
 
diff --git a/lib/Makefile b/lib/Makefile
index 87eb3bf..58f74d2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
-	 gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \
+	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
 	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
 obj-y += string_helpers.o
diff --git a/mm/iov_iter.c b/lib/iov_iter.c
similarity index 97%
rename from mm/iov_iter.c
rename to lib/iov_iter.c
index 8277320..9d96e283 100644
--- a/mm/iov_iter.c
+++ b/lib/iov_iter.c
@@ -751,3 +751,18 @@
 	return npages;
 }
 EXPORT_SYMBOL(iov_iter_npages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
+{
+	*new = *old;
+	if (new->type & ITER_BVEC)
+		return new->bvec = kmemdup(new->bvec,
+				    new->nr_segs * sizeof(struct bio_vec),
+				    flags);
+	else
+		/* iovec and kvec have identical layout */
+		return new->iov = kmemdup(new->iov,
+				   new->nr_segs * sizeof(struct iovec),
+				   flags);
+}
+EXPORT_SYMBOL(dup_iter);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9cc4c4a..b5344ef 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/log2.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
@@ -217,15 +218,15 @@
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
 					       size_t nbuckets)
 {
-	struct bucket_table *tbl;
+	struct bucket_table *tbl = NULL;
 	size_t size;
 	int i;
 
 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-	tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
 	if (tbl == NULL)
 		tbl = vzalloc(size);
-
 	if (tbl == NULL)
 		return NULL;
 
@@ -247,26 +248,24 @@
  * @ht:		hash table
  * @new_size:	new table size
  */
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
 {
 	/* Expand table when exceeding 75% load */
 	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
-	       (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
+	       (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
 }
-EXPORT_SYMBOL_GPL(rht_grow_above_75);
 
 /**
  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  * @ht:		hash table
  * @new_size:	new table size
  */
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
 {
 	/* Shrink table beneath 30% load */
 	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
 	       (atomic_read(&ht->shift) > ht->p.min_shift);
 }
-EXPORT_SYMBOL_GPL(rht_shrink_below_30);
 
 static void lock_buckets(struct bucket_table *new_tbl,
 			 struct bucket_table *old_tbl, unsigned int hash)
@@ -414,6 +413,7 @@
 			}
 		}
 		unlock_buckets(new_tbl, old_tbl, new_hash);
+		cond_resched();
 	}
 
 	/* Unzip interleaved hash chains */
@@ -437,6 +437,7 @@
 				complete = false;
 
 			unlock_buckets(new_tbl, old_tbl, old_hash);
+			cond_resched();
 		}
 	}
 
@@ -495,6 +496,7 @@
 				   tbl->buckets[new_hash + new_tbl->size]);
 
 		unlock_buckets(new_tbl, tbl, new_hash);
+		cond_resched();
 	}
 
 	/* Publish the new, valid hash table */
@@ -528,31 +530,19 @@
 	list_for_each_entry(walker, &ht->walkers, list)
 		walker->resize = true;
 
-	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+	if (rht_grow_above_75(ht, tbl->size))
 		rhashtable_expand(ht);
-	else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
+	else if (rht_shrink_below_30(ht, tbl->size))
 		rhashtable_shrink(ht);
-
 unlock:
 	mutex_unlock(&ht->mutex);
 }
 
-static void rhashtable_wakeup_worker(struct rhashtable *ht)
-{
-	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
-	struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
-	size_t size = tbl->size;
-
-	/* Only adjust the table if no resizing is currently in progress. */
-	if (tbl == new_tbl &&
-	    ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
-	     (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
-		schedule_work(&ht->run_work);
-}
-
 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
-				struct bucket_table *tbl, u32 hash)
+				struct bucket_table *tbl,
+				const struct bucket_table *old_tbl, u32 hash)
 {
+	bool no_resize_running = tbl == old_tbl;
 	struct rhash_head *head;
 
 	hash = rht_bucket_index(tbl, hash);
@@ -568,8 +558,8 @@
 	rcu_assign_pointer(tbl->buckets[hash], obj);
 
 	atomic_inc(&ht->nelems);
-
-	rhashtable_wakeup_worker(ht);
+	if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+		schedule_work(&ht->run_work);
 }
 
 /**
@@ -599,7 +589,7 @@
 	hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
 
 	lock_buckets(tbl, old_tbl, hash);
-	__rhashtable_insert(ht, obj, tbl, hash);
+	__rhashtable_insert(ht, obj, tbl, old_tbl, hash);
 	unlock_buckets(tbl, old_tbl, hash);
 
 	rcu_read_unlock();
@@ -681,8 +671,11 @@
 	unlock_buckets(new_tbl, old_tbl, new_hash);
 
 	if (ret) {
+		bool no_resize_running = new_tbl == old_tbl;
+
 		atomic_dec(&ht->nelems);
-		rhashtable_wakeup_worker(ht);
+		if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
+			schedule_work(&ht->run_work);
 	}
 
 	rcu_read_unlock();
@@ -852,7 +845,7 @@
 		goto exit;
 	}
 
-	__rhashtable_insert(ht, obj, new_tbl, new_hash);
+	__rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
 
 exit:
 	unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -894,6 +887,9 @@
 	if (!iter->walker)
 		return -ENOMEM;
 
+	INIT_LIST_HEAD(&iter->walker->list);
+	iter->walker->resize = false;
+
 	mutex_lock(&ht->mutex);
 	list_add(&iter->walker->list, &ht->walkers);
 	mutex_unlock(&ht->mutex);
@@ -1111,8 +1107,7 @@
 	if (!ht->p.hash_rnd)
 		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
 
-	if (ht->p.grow_decision || ht->p.shrink_decision)
-		INIT_WORK(&ht->run_work, rht_deferred_worker);
+	INIT_WORK(&ht->run_work, rht_deferred_worker);
 
 	return 0;
 }
@@ -1130,8 +1125,7 @@
 {
 	ht->being_destroyed = true;
 
-	if (ht->p.grow_decision || ht->p.shrink_decision)
-		cancel_work_sync(&ht->run_work);
+	cancel_work_sync(&ht->run_work);
 
 	mutex_lock(&ht->mutex);
 	bucket_table_free(rht_dereference(ht->tbl, ht));
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 88c0854..5c94e10 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -61,7 +61,7 @@
 
 	if (s->len < s->size) {
 		len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
-		if (seq_buf_can_fit(s, len)) {
+		if (s->len + len < s->size) {
 			s->len += len;
 			return 0;
 		}
@@ -118,7 +118,7 @@
 
 	if (s->len < s->size) {
 		ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
-		if (seq_buf_can_fit(s, ret)) {
+		if (s->len + ret < s->size) {
 			s->len += ret;
 			return 0;
 		}
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 1dfeba7..67c7593 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -191,18 +191,18 @@
 	return err;
 }
 
+static struct rhashtable ht;
+
 static int __init test_rht_init(void)
 {
-	struct rhashtable ht;
 	struct rhashtable_params params = {
 		.nelem_hint = TEST_HT_SIZE,
 		.head_offset = offsetof(struct test_obj, node),
 		.key_offset = offsetof(struct test_obj, value),
 		.key_len = sizeof(int),
 		.hashfn = jhash,
+		.max_shift = 1, /* we expand/shrink manually here */
 		.nulls_base = (3U << RHT_BASE_SHIFT),
-		.grow_decision = rht_grow_above_75,
-		.shrink_decision = rht_shrink_below_30,
 	};
 	int err;
 
@@ -222,6 +222,11 @@
 	return err;
 }
 
+static void __exit test_rht_exit(void)
+{
+}
+
 module_init(test_rht_init);
+module_exit(test_rht_exit);
 
 MODULE_LICENSE("GPL v2");
diff --git a/mm/Makefile b/mm/Makefile
index 3c1caa2..15dbe99 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,7 +21,7 @@
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   iov_iter.o debug.o $(mmu-y)
+			   debug.o $(mmu-y)
 
 obj-y += init-mm.o
 
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd..68ecb7a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@
 	return (1UL << (align_order - cma->order_per_bit)) - 1;
 }
 
+/*
+ * Find a PFN aligned to the specified order and return an offset represented in
+ * order_per_bits.
+ */
 static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
 {
-	unsigned int alignment;
-
 	if (align_order <= cma->order_per_bit)
 		return 0;
-	alignment = 1UL << (align_order - cma->order_per_bit);
-	return ALIGN(cma->base_pfn, alignment) -
-		(cma->base_pfn >> cma->order_per_bit);
+
+	return (ALIGN(cma->base_pfn, (1UL << align_order))
+		- cma->base_pfn) >> cma->order_per_bit;
 }
 
 static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fc00c8c..626e93d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1295,8 +1295,13 @@
 	 * Avoid grouping on DSO/COW pages in specific and RO pages
 	 * in general, RO pages shouldn't hurt as much anyway since
 	 * they can be in shared cache state.
+	 *
+	 * FIXME! This checks "pmd_dirty()" as an approximation of
+	 * "is this a read-only page", since checking "pmd_write()"
+	 * is even more broken. We haven't actually turned this into
+	 * a writable page, so pmd_write() will always be false.
 	 */
-	if (!pmd_write(pmd))
+	if (!pmd_dirty(pmd))
 		flags |= TNF_NO_GROUP;
 
 	/*
@@ -1482,6 +1487,7 @@
 
 	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 		pmd_t entry;
+		ret = 1;
 
 		/*
 		 * Avoid trapping faults against the zero page. The read-only
@@ -1490,11 +1496,10 @@
 		 */
 		if (prot_numa && is_huge_zero_pmd(*pmd)) {
 			spin_unlock(ptl);
-			return 0;
+			return ret;
 		}
 
 		if (!prot_numa || !pmd_protnone(*pmd)) {
-			ret = 1;
 			entry = pmdp_get_and_clear_notify(mm, addr, pmd);
 			entry = pmd_modify(entry, newprot);
 			ret = HPAGE_PMD_NR;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a9ac6c..c41b2a0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -917,7 +917,6 @@
 	__SetPageHead(page);
 	__ClearPageReserved(page);
 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-		__SetPageTail(p);
 		/*
 		 * For gigantic hugepages allocated through bootmem at
 		 * boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@
 		__ClearPageReserved(p);
 		set_page_count(p, 0);
 		p->first_page = page;
+		/* Make sure p->first_page is always valid for PageTail() */
+		smp_wmb();
+		__SetPageTail(p);
 	}
 }
 
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee63..936d816 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
 #include <linux/stacktrace.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
 #include <linux/kasan.h>
 
 #include "kasan.h"
@@ -414,12 +415,19 @@
 			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
 			__builtin_return_address(0));
-	return ret ? 0 : -ENOMEM;
+
+	if (ret) {
+		find_vm_area(addr)->flags |= VM_KASAN;
+		return 0;
+	}
+
+	return -ENOMEM;
 }
 
-void kasan_module_free(void *addr)
+void kasan_free_shadow(const struct vm_struct *vm)
 {
-	vfree(kasan_mem_to_shadow(addr));
+	if (vm->flags & VM_KASAN)
+		vfree(kasan_mem_to_shadow(vm->addr));
 }
 
 static void register_global(struct kasan_global *global)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d18d3a6..b34ef4a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5232,7 +5232,9 @@
 	 * on for the root memcg is enough.
 	 */
 	if (cgroup_on_dfl(root_css->cgroup))
-		mem_cgroup_from_css(root_css)->use_hierarchy = true;
+		root_mem_cgroup->use_hierarchy = true;
+	else
+		root_mem_cgroup->use_hierarchy = false;
 }
 
 static u64 memory_current_read(struct cgroup_subsys_state *css,
@@ -5247,7 +5249,7 @@
 	unsigned long low = ACCESS_ONCE(memcg->low);
 
 	if (low == PAGE_COUNTER_MAX)
-		seq_puts(m, "infinity\n");
+		seq_puts(m, "max\n");
 	else
 		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
 
@@ -5262,7 +5264,7 @@
 	int err;
 
 	buf = strstrip(buf);
-	err = page_counter_memparse(buf, "infinity", &low);
+	err = page_counter_memparse(buf, "max", &low);
 	if (err)
 		return err;
 
@@ -5277,7 +5279,7 @@
 	unsigned long high = ACCESS_ONCE(memcg->high);
 
 	if (high == PAGE_COUNTER_MAX)
-		seq_puts(m, "infinity\n");
+		seq_puts(m, "max\n");
 	else
 		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
 
@@ -5292,7 +5294,7 @@
 	int err;
 
 	buf = strstrip(buf);
-	err = page_counter_memparse(buf, "infinity", &high);
+	err = page_counter_memparse(buf, "max", &high);
 	if (err)
 		return err;
 
@@ -5307,7 +5309,7 @@
 	unsigned long max = ACCESS_ONCE(memcg->memory.limit);
 
 	if (max == PAGE_COUNTER_MAX)
-		seq_puts(m, "infinity\n");
+		seq_puts(m, "max\n");
 	else
 		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
 
@@ -5322,7 +5324,7 @@
 	int err;
 
 	buf = strstrip(buf);
-	err = page_counter_memparse(buf, "infinity", &max);
+	err = page_counter_memparse(buf, "max", &max);
 	if (err)
 		return err;
 
@@ -5426,7 +5428,7 @@
 	if (memcg == root_mem_cgroup)
 		return false;
 
-	if (page_counter_read(&memcg->memory) > memcg->low)
+	if (page_counter_read(&memcg->memory) >= memcg->low)
 		return false;
 
 	while (memcg != root) {
@@ -5435,7 +5437,7 @@
 		if (memcg == root_mem_cgroup)
 			break;
 
-		if (page_counter_read(&memcg->memory) > memcg->low)
+		if (page_counter_read(&memcg->memory) >= memcg->low)
 			return false;
 	}
 	return true;
diff --git a/mm/memory.c b/mm/memory.c
index 8068893..411144f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3072,8 +3072,13 @@
 	 * Avoid grouping on DSO/COW pages in specific and RO pages
 	 * in general, RO pages shouldn't hurt as much anyway since
 	 * they can be in shared cache state.
+	 *
+	 * FIXME! This checks "pmd_dirty()" as an approximation of
+	 * "is this a read-only page", since checking "pmd_write()"
+	 * is even more broken. We haven't actually turned this into
+	 * a writable page, so pmd_write() will always be false.
 	 */
-	if (!pte_write(pte))
+	if (!pte_dirty(pte))
 		flags |= TNF_NO_GROUP;
 
 	/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 73cf098..8a54cd2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -26,10 +26,10 @@
 
 int can_do_mlock(void)
 {
-	if (capable(CAP_IPC_LOCK))
-		return 1;
 	if (rlimit(RLIMIT_MEMLOCK) != 0)
 		return 1;
+	if (capable(CAP_IPC_LOCK))
+		return 1;
 	return 0;
 }
 EXPORT_SYMBOL(can_do_mlock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 7296360..3fba2dc9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,6 +62,7 @@
 EXPORT_SYMBOL(high_memory);
 struct page *mem_map;
 unsigned long max_mapnr;
+EXPORT_SYMBOL(max_mapnr);
 unsigned long highest_memmap_pfn;
 struct percpu_counter vm_committed_as;
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
@@ -1213,11 +1214,9 @@
 	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
 		total = point;
 		kdebug("try to alloc exact %lu pages", total);
-		base = alloc_pages_exact(len, GFP_KERNEL);
-	} else {
-		base = (void *)__get_free_pages(GFP_KERNEL, order);
 	}
 
+	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
 	if (!base)
 		goto enomem;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a47f0b2..40e2942 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2353,8 +2353,15 @@
 		if (ac->high_zoneidx < ZONE_NORMAL)
 			goto out;
 		/* The OOM killer does not compensate for light reclaim */
-		if (!(gfp_mask & __GFP_FS))
+		if (!(gfp_mask & __GFP_FS)) {
+			/*
+			 * XXX: Page reclaim didn't yield anything,
+			 * and the OOM killer can't be invoked, but
+			 * keep looping as per should_alloc_retry().
+			 */
+			*did_some_progress = 1;
 			goto out;
+		}
 		/*
 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2366,7 +2373,8 @@
 			goto out;
 	}
 	/* Exhausted what can be done so it's blamo time */
-	if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false))
+	if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
+			|| WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
 		*did_some_progress = 1;
 out:
 	oom_zonelist_unlock(ac->zonelist, gfp_mask);
diff --git a/mm/shmem.c b/mm/shmem.c
index a63031f..cf2d0ca 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1455,6 +1455,9 @@
 
 bool shmem_mapping(struct address_space *mapping)
 {
+	if (!mapping->host)
+		return false;
+
 	return mapping->host->i_sb->s_op == &shmem_ops;
 }
 
@@ -2319,8 +2322,8 @@
 
 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
 {
-	bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
-	bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+	bool old_is_dir = d_is_dir(old_dentry);
+	bool new_is_dir = d_is_dir(new_dentry);
 
 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
 		if (old_is_dir) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1..49abccf 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@
 		spin_unlock(&vmap_area_lock);
 
 		vmap_debug_free_range(va->va_start, va->va_end);
+		kasan_free_shadow(vm);
 		free_unmap_vmap_area(va);
 		vm->size -= PAGE_SIZE;
 
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index d8e376a..36a1a73 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -658,14 +658,30 @@
 static void p9_virtio_remove(struct virtio_device *vdev)
 {
 	struct virtio_chan *chan = vdev->priv;
-
-	if (chan->inuse)
-		p9_virtio_close(chan->client);
-	vdev->config->del_vqs(vdev);
+	unsigned long warning_time;
 
 	mutex_lock(&virtio_9p_lock);
+
+	/* Remove self from list so we don't get new users. */
 	list_del(&chan->chan_list);
+	warning_time = jiffies;
+
+	/* Wait for existing users to close. */
+	while (chan->inuse) {
+		mutex_unlock(&virtio_9p_lock);
+		msleep(250);
+		if (time_after(jiffies, warning_time + 10 * HZ)) {
+			dev_emerg(&vdev->dev,
+				  "p9_virtio_remove: waiting for device in use.\n");
+			warning_time = jiffies;
+		}
+		mutex_lock(&virtio_9p_lock);
+	}
+
 	mutex_unlock(&virtio_9p_lock);
+
+	vdev->config->del_vqs(vdev);
+
 	sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
 	kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
 	kfree(chan->tag);
diff --git a/net/bridge/br.c b/net/bridge/br.c
index fb57ab6..02c24cf 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -190,6 +190,8 @@
 {
 	int err;
 
+	BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+
 	err = stp_proto_register(&br_stp_proto);
 	if (err < 0) {
 		pr_err("bridge: can't register sap for STP\n");
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b087d27..1849d96 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -563,6 +563,8 @@
 	 */
 	del_nbp(p);
 
+	dev_set_mtu(br->dev, br_min_mtu(br));
+
 	spin_lock_bh(&br->lock);
 	changed_addr = br_stp_recalculate_bridge_id(br);
 	spin_unlock_bh(&br->lock);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 769b185..a6e2da0 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -281,7 +281,7 @@
 	int copylen;
 
 	ret = -EOPNOTSUPP;
-	if (m->msg_flags&MSG_OOB)
+	if (flags & MSG_OOB)
 		goto read_error;
 
 	skb = skb_recv_datagram(sk, flags, 0 , &ret);
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 8bc7caa..434ba85 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -84,7 +84,7 @@
 	u16 tmp;
 	u16 len;
 	u16 hdrchks;
-	u16 pktchks;
+	int pktchks;
 	struct cffrml *this;
 	this = container_obj(layr);
 
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 1be0b52..f6c3b21 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -255,9 +255,9 @@
 	return skb->len;
 }
 
-inline u16 cfpkt_iterate(struct cfpkt *pkt,
-			 u16 (*iter_func)(u16, void *, u16),
-			 u16 data)
+int cfpkt_iterate(struct cfpkt *pkt,
+		  u16 (*iter_func)(u16, void *, u16),
+		  u16 data)
 {
 	/*
 	 * Don't care about the performance hit of linearizing,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 66e0804..32d710e 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -259,6 +259,9 @@
 		goto inval_skb;
 	}
 
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb_reset_transport_header(skb);
 
diff --git a/net/compat.c b/net/compat.c
index 3236b41..94d3d5e 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -711,24 +711,18 @@
 
 COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-	if (flags & MSG_CMSG_COMPAT)
-		return -EINVAL;
 	return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
 		       unsigned int, vlen, unsigned int, flags)
 {
-	if (flags & MSG_CMSG_COMPAT)
-		return -EINVAL;
 	return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 			      flags | MSG_CMSG_COMPAT);
 }
 
 COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
-	if (flags & MSG_CMSG_COMPAT)
-		return -EINVAL;
 	return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
@@ -751,9 +745,6 @@
 	int datagrams;
 	struct timespec ktspec;
 
-	if (flags & MSG_CMSG_COMPAT)
-		return -EINVAL;
-
 	if (timeout == NULL)
 		return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 				      flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8f9710c..962ee9d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -946,7 +946,7 @@
 		return false;
 
 	while (*name) {
-		if (*name == '/' || isspace(*name))
+		if (*name == '/' || *name == ':' || isspace(*name))
 			return false;
 		name++;
 	}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 91f74f3..aa378ec 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -98,6 +98,7 @@
 	[NETIF_F_RXALL_BIT] =            "rx-all",
 	[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
 	[NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
+	[NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
 };
 
 static const char
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0c08062..1e2f46a 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -32,6 +32,9 @@
 	return 0;
 
 nla_put_failure:
+	kfree(d->xstats);
+	d->xstats = NULL;
+	d->xstats_len = 0;
 	spin_unlock_bh(d->lock);
 	return -1;
 }
@@ -305,7 +308,9 @@
 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
 {
 	if (d->compat_xstats) {
-		d->xstats = st;
+		d->xstats = kmemdup(st, len, GFP_ATOMIC);
+		if (!d->xstats)
+			goto err_out;
 		d->xstats_len = len;
 	}
 
@@ -313,6 +318,11 @@
 		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
 
 	return 0;
+
+err_out:
+	d->xstats_len = 0;
+	spin_unlock_bh(d->lock);
+	return -1;
 }
 EXPORT_SYMBOL(gnet_stats_copy_app);
 
@@ -345,6 +355,9 @@
 			return -1;
 	}
 
+	kfree(d->xstats);
+	d->xstats = NULL;
+	d->xstats_len = 0;
 	spin_unlock_bh(d->lock);
 	return 0;
 }
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b4899f5b..508155b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1134,6 +1134,9 @@
 			return len;
 
 		i += len;
+		if ((value > 1) &&
+		    (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+			return -ENOTSUPP;
 		pkt_dev->burst = value < 1 ? 1 : value;
 		sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
 		return count;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ab293a3..ee0608b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1300,7 +1300,6 @@
 	s_h = cb->args[0];
 	s_idx = cb->args[1];
 
-	rcu_read_lock();
 	cb->seq = net->dev_base_seq;
 
 	/* A hack to preserve kernel<->userspace interface.
@@ -1322,7 +1321,7 @@
 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
 		idx = 0;
 		head = &net->dev_index_head[h];
-		hlist_for_each_entry_rcu(dev, head, index_hlist) {
+		hlist_for_each_entry(dev, head, index_hlist) {
 			if (idx < s_idx)
 				goto cont;
 			err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1344,7 +1343,6 @@
 		}
 	}
 out:
-	rcu_read_unlock();
 	cb->args[1] = idx;
 	cb->args[0] = h;
 
@@ -2012,8 +2010,8 @@
 	}
 
 	if (1) {
-		struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
-		struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
+		struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
+		struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
 		struct nlattr **data = NULL;
 		struct nlattr **slave_data = NULL;
 		struct net *dest_net, *link_net = NULL;
@@ -2122,6 +2120,10 @@
 		if (IS_ERR(dest_net))
 			return PTR_ERR(dest_net);
 
+		err = -EPERM;
+		if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
+			goto out;
+
 		if (tb[IFLA_LINK_NETNSID]) {
 			int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
 
@@ -2130,6 +2132,9 @@
 				err =  -EINVAL;
 				goto out;
 			}
+			err = -EPERM;
+			if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+				goto out;
 		}
 
 		dev = rtnl_create_link(link_net ? : dest_net, ifname,
@@ -2161,28 +2166,28 @@
 			}
 		}
 		err = rtnl_configure_link(dev, ifm);
-		if (err < 0) {
-			if (ops->newlink) {
-				LIST_HEAD(list_kill);
-
-				ops->dellink(dev, &list_kill);
-				unregister_netdevice_many(&list_kill);
-			} else {
-				unregister_netdevice(dev);
-			}
-			goto out;
-		}
-
+		if (err < 0)
+			goto out_unregister;
 		if (link_net) {
 			err = dev_change_net_namespace(dev, dest_net, ifname);
 			if (err < 0)
-				unregister_netdevice(dev);
+				goto out_unregister;
 		}
 out:
 		if (link_net)
 			put_net(link_net);
 		put_net(dest_net);
 		return err;
+out_unregister:
+		if (ops->newlink) {
+			LIST_HEAD(list_kill);
+
+			ops->dellink(dev, &list_kill);
+			unregister_netdevice_many(&list_kill);
+		} else {
+			unregister_netdevice(dev);
+		}
+		goto out;
 	}
 }
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 88c613e..8e4ac97 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3621,13 +3621,14 @@
 {
 	struct sk_buff_head *q = &sk->sk_error_queue;
 	struct sk_buff *skb, *skb_next;
+	unsigned long flags;
 	int err = 0;
 
-	spin_lock_bh(&q->lock);
+	spin_lock_irqsave(&q->lock, flags);
 	skb = __skb_dequeue(q);
 	if (skb && (skb_next = skb_peek(q)))
 		err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
-	spin_unlock_bh(&q->lock);
+	spin_unlock_irqrestore(&q->lock, flags);
 
 	sk->sk_err = err;
 	if (err)
@@ -3732,9 +3733,13 @@
 		     struct sock *sk, int tstype)
 {
 	struct sk_buff *skb;
-	bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+	bool tsonly;
 
-	if (!sk || !skb_may_tx_timestamp(sk, tsonly))
+	if (!sk)
+		return;
+
+	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+	if (!skb_may_tx_timestamp(sk, tsonly))
 		return;
 
 	if (tsonly)
@@ -4172,7 +4177,7 @@
 	skb->ignore_df = 0;
 	skb_dst_drop(skb);
 	skb->mark = 0;
-	skb->sender_cpu = 0;
+	skb_sender_cpu_clear(skb);
 	skb_init_secmark(skb);
 	secpath_reset(skb);
 	nf_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 93c8b20..78e89eb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1655,6 +1655,10 @@
 }
 EXPORT_SYMBOL(sock_rfree);
 
+/*
+ * Buffer destructor for skbs that are not used directly in read or write
+ * path, e.g. for error handler skbs. Automatically called from kfree_skb.
+ */
 void sock_efree(struct sk_buff *skb)
 {
 	sock_put(skb->sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4334248..8ce351f 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,8 @@
 static int zero = 0;
 static int one = 1;
 static int ushort_max = USHRT_MAX;
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 static int net_msg_warn;	/* Unused, but still a sysctl */
 
@@ -237,7 +239,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_sndbuf,
 	},
 	{
 		.procname	= "rmem_max",
@@ -245,7 +247,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_rcvbuf,
 	},
 	{
 		.procname	= "wmem_default",
@@ -253,7 +255,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_sndbuf,
 	},
 	{
 		.procname	= "rmem_default",
@@ -261,7 +263,7 @@
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &one,
+		.extra1		= &min_rcvbuf,
 	},
 	{
 		.procname	= "dev_weight",
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1d7c125..3b81092 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1062,7 +1062,7 @@
 	if (decnet_debug_level & 16)
 		printk(KERN_DEBUG
 		       "dn_route_output_slow: initial checks complete."
-		       " dst=%o4x src=%04x oif=%d try_hard=%d\n",
+		       " dst=%04x src=%04x oif=%d try_hard=%d\n",
 		       le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
 		       fld.flowidn_oif, try_hard);
 
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index a138d75..44d2746 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -359,8 +359,11 @@
 	struct hsr_port *port;
 
 	hsr = netdev_priv(hsr_dev);
+
+	rtnl_lock();
 	hsr_for_each_port(hsr, port)
 		hsr_del_port(port);
+	rtnl_unlock();
 
 	del_timer_sync(&hsr->prune_timer);
 	del_timer_sync(&hsr->announce_timer);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 779d28b..cd37d001 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -36,6 +36,10 @@
 			return NOTIFY_DONE;	/* Not an HSR device */
 		hsr = netdev_priv(dev);
 		port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+		if (port == NULL) {
+			/* Resend of notification concerning removed device? */
+			return NOTIFY_DONE;
+		}
 	} else {
 		hsr = port->hsr;
 	}
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index a348dcb..7d37366 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -181,8 +181,10 @@
 	list_del_rcu(&port->port_list);
 
 	if (port != master) {
-		netdev_update_features(master->dev);
-		dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+		if (master != NULL) {
+			netdev_update_features(master->dev);
+			dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+		}
 		netdev_rx_handler_unregister(port->dev);
 		dev_set_promiscuity(port->dev, -1);
 	}
@@ -192,5 +194,7 @@
 	 */
 
 	synchronize_rcu();
-	dev_put(port->dev);
+
+	if (port != master)
+		dev_put(port->dev);
 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 14d02ea..3e44b9b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -268,6 +268,7 @@
 		release_sock(sk);
 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 			timeo = schedule_timeout(timeo);
+		sched_annotate_sleep();
 		lock_sock(sk);
 		err = 0;
 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 81751f1..592aff3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -71,6 +71,20 @@
 	mutex_unlock(&inet_diag_table_mutex);
 }
 
+static size_t inet_sk_attr_size(void)
+{
+	return	  nla_total_size(sizeof(struct tcp_info))
+		+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+		+ nla_total_size(1) /* INET_DIAG_TOS */
+		+ nla_total_size(1) /* INET_DIAG_TCLASS */
+		+ nla_total_size(sizeof(struct inet_diag_meminfo))
+		+ nla_total_size(sizeof(struct inet_diag_msg))
+		+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+		+ nla_total_size(TCP_CA_NAME_MAX)
+		+ nla_total_size(sizeof(struct tcpvegas_info))
+		+ 64;
+}
+
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
 			      struct user_namespace *user_ns,		      	
@@ -326,9 +340,7 @@
 	if (err)
 		goto out;
 
-	rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-			sizeof(struct inet_diag_meminfo) +
-			sizeof(struct tcp_info) + 64, GFP_KERNEL);
+	rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
 	if (!rep) {
 		err = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 787b3c2..d9bc28a 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -67,6 +67,7 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
+	skb_sender_cpu_clear(skb);
 	return dst_output(skb);
 }
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e5b6d0d..145a50c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -659,27 +659,30 @@
 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
 {
 	struct iphdr iph;
+	int netoff;
 	u32 len;
 
 	if (skb->protocol != htons(ETH_P_IP))
 		return skb;
 
-	if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
+	netoff = skb_network_offset(skb);
+
+	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
 		return skb;
 
 	if (iph.ihl < 5 || iph.version != 4)
 		return skb;
 
 	len = ntohs(iph.tot_len);
-	if (skb->len < len || len < (iph.ihl * 4))
+	if (skb->len < netoff + len || len < (iph.ihl * 4))
 		return skb;
 
 	if (ip_is_fragment(&iph)) {
 		skb = skb_share_check(skb, GFP_ATOMIC);
 		if (skb) {
-			if (!pskb_may_pull(skb, iph.ihl*4))
+			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
 				return skb;
-			if (pskb_trim_rcsum(skb, len))
+			if (pskb_trim_rcsum(skb, netoff + len))
 				return skb;
 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 			if (ip_defrag(skb, user))
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d68199d..a7aea20 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -888,7 +888,8 @@
 	cork->length += length;
 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+	    (sk->sk_type == SOCK_DGRAM)) {
 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 					 hh_len, fragheaderlen, transhdrlen,
 					 maxfraglen, flags);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 31d8c71..5cd9927 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -432,17 +432,32 @@
 		kfree_skb(skb);
 }
 
-static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk,
-					  const struct sk_buff *skb,
-					  int ee_origin)
+/* IPv4 supports cmsg on all imcp errors and some timestamps
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ipv4_datagram_support_cmsg(const struct sock *sk,
+				       struct sk_buff *skb,
+				       int ee_origin)
 {
-	struct in_pktinfo *info = PKTINFO_SKB_CB(skb);
+	struct in_pktinfo *info;
 
-	if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) ||
-	    (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
+	if (ee_origin == SO_EE_ORIGIN_ICMP)
+		return true;
+
+	if (ee_origin == SO_EE_ORIGIN_LOCAL)
+		return false;
+
+	/* Support IP_PKTINFO on tstamp packets if requested, to correlate
+	 * timestamp with egress dev. Not possible for packets without dev
+	 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+	 */
+	if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
 	    (!skb->dev))
 		return false;
 
+	info = PKTINFO_SKB_CB(skb);
 	info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
 	info->ipi_ifindex = skb->dev->ifindex;
 	return true;
@@ -483,7 +498,7 @@
 
 	serr = SKB_EXT_ERR(skb);
 
-	if (sin && skb->len) {
+	if (sin && serr->port) {
 		sin->sin_family = AF_INET;
 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
 						   serr->addr_offset);
@@ -496,9 +511,7 @@
 	sin = &errhdr.offender;
 	memset(sin, 0, sizeof(*sin));
 
-	if (skb->len &&
-	    (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
-	     ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) {
+	if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
 		sin->sin_family = AF_INET;
 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
 		if (inet_sk(sk)->cmsg_flags)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index e9f66e1..208d543 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -259,6 +259,9 @@
 	kgid_t low, high;
 	int ret = 0;
 
+	if (sk->sk_family == AF_INET6)
+		sk->sk_ipv6only = 1;
+
 	inet_get_ping_group_range_net(net, &low, &high);
 	if (gid_lte(low, group) && gid_lte(group, high))
 		return 0;
@@ -305,6 +308,11 @@
 		if (addr_len < sizeof(*addr))
 			return -EINVAL;
 
+		if (addr->sin_family != AF_INET &&
+		    !(addr->sin_family == AF_UNSPEC &&
+		      addr->sin_addr.s_addr == htonl(INADDR_ANY)))
+			return -EAFNOSUPPORT;
+
 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
 			 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
@@ -330,7 +338,7 @@
 			return -EINVAL;
 
 		if (addr->sin6_family != AF_INET6)
-			return -EINVAL;
+			return -EAFNOSUPPORT;
 
 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
 			 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
@@ -716,7 +724,7 @@
 		if (msg->msg_namelen < sizeof(*usin))
 			return -EINVAL;
 		if (usin->sin_family != AF_INET)
-			return -EINVAL;
+			return -EAFNOSUPPORT;
 		daddr = usin->sin_addr.s_addr;
 		/* no remote port */
 	} else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9d72a0f..995a225 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,17 +835,13 @@
 				       int large_allowed)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	u32 new_size_goal, size_goal, hlen;
+	u32 new_size_goal, size_goal;
 
 	if (!large_allowed || !sk_can_gso(sk))
 		return mss_now;
 
-	/* Maybe we should/could use sk->sk_prot->max_header here ? */
-	hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
-	       inet_csk(sk)->icsk_ext_hdr_len +
-	       tp->tcp_header_len;
-
-	new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+	/* Note : tcp_tso_autosize() will eventually split this later */
+	new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
 	new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
 
 	/* We try hard to avoid divides here */
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index d694088..62856e1 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -378,6 +378,12 @@
  */
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 {
+	/* If credits accumulated at a higher w, apply them gently now. */
+	if (tp->snd_cwnd_cnt >= w) {
+		tp->snd_cwnd_cnt = 0;
+		tp->snd_cwnd++;
+	}
+
 	tp->snd_cwnd_cnt += acked;
 	if (tp->snd_cwnd_cnt >= w) {
 		u32 delta = tp->snd_cwnd_cnt / w;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4b276d1..06d3d66 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -306,8 +306,10 @@
 		}
 	}
 
-	if (ca->cnt == 0)			/* cannot be zero */
-		ca->cnt = 1;
+	/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
+	 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
+	 */
+	ca->cnt = max(ca->cnt, 2U);
 }
 
 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fdd27b..fb4cf8b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4770,7 +4770,7 @@
 		return false;
 
 	/* If we filled the congestion window, do not expand.  */
-	if (tp->packets_out >= tp->snd_cwnd)
+	if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
 		return false;
 
 	return true;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d5f6bd9..dab7381 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -63,6 +63,7 @@
 		return err;
 
 	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+	skb->protocol = htons(ETH_P_IP);
 
 	return x->outer_mode->output2(x, skb);
 }
@@ -71,7 +72,6 @@
 int xfrm4_output_finish(struct sk_buff *skb)
 {
 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-	skb->protocol = htons(ETH_P_IP);
 
 #ifdef CONFIG_NETFILTER
 	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 98e4a63..b603002 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4903,6 +4903,21 @@
 	return ret;
 }
 
+static
+int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+			void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct inet6_dev *idev = ctl->extra1;
+	int min_mtu = IPV6_MIN_MTU;
+	struct ctl_table lctl;
+
+	lctl = *ctl;
+	lctl.extra1 = &min_mtu;
+	lctl.extra2 = idev ? &idev->dev->mtu : NULL;
+
+	return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
+}
+
 static void dev_disable_change(struct inet6_dev *idev)
 {
 	struct netdev_notifier_info info;
@@ -5054,7 +5069,7 @@
 			.data		= &ipv6_devconf.mtu6,
 			.maxlen		= sizeof(int),
 			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
+			.proc_handler	= addrconf_sysctl_mtu,
 		},
 		{
 			.procname	= "accept_ra",
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c215be7..ace8dac 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -325,14 +325,34 @@
 	kfree_skb(skb);
 }
 
-static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb)
+/* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+ *
+ * At one point, excluding local errors was a quick test to identify icmp/icmp6
+ * errors. This is no longer true, but the test remained, so the v6 stack,
+ * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+ * the PKTINFO fields in skb->cb[]. Fill those in here.
+ */
+static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
+				      struct sock_exterr_skb *serr)
 {
-	int ifindex = skb->dev ? skb->dev->ifindex : -1;
+	if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+	    serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
+		return true;
+
+	if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
+		return false;
+
+	if (!skb->dev)
+		return false;
 
 	if (skb->protocol == htons(ETH_P_IPV6))
-		IP6CB(skb)->iif = ifindex;
+		IP6CB(skb)->iif = skb->dev->ifindex;
 	else
-		PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex;
+		PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
+
+	return true;
 }
 
 /*
@@ -369,7 +389,7 @@
 
 	serr = SKB_EXT_ERR(skb);
 
-	if (sin && skb->len) {
+	if (sin && serr->port) {
 		const unsigned char *nh = skb_network_header(skb);
 		sin->sin6_family = AF_INET6;
 		sin->sin6_flowinfo = 0;
@@ -394,14 +414,11 @@
 	memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
 	sin = &errhdr.offender;
 	memset(sin, 0, sizeof(*sin));
-	if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) {
+
+	if (ip6_datagram_support_cmsg(skb, serr)) {
 		sin->sin6_family = AF_INET6;
-		if (np->rxopt.all) {
-			if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
-			    serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
-				ip6_datagram_prepare_pktinfo_errqueue(skb);
+		if (np->rxopt.all)
 			ip6_datagram_recv_common_ctl(sk, msg, skb);
-		}
 		if (skb->protocol == htons(ETH_P_IPV6)) {
 			sin->sin6_addr = ipv6_hdr(skb)->saddr;
 			if (np->rxopt.all)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7deebf1..7e80b61 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -318,6 +318,7 @@
 
 static inline int ip6_forward_finish(struct sk_buff *skb)
 {
+	skb_sender_cpu_clear(skb);
 	return dst_output(skb);
 }
 
@@ -1298,7 +1299,8 @@
 	if (((length > mtu) ||
 	     (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
-	    (rt->dst.dev->features & NETIF_F_UFO)) {
+	    (rt->dst.dev->features & NETIF_F_UFO) &&
+	    (sk->sk_type == SOCK_DGRAM)) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 					  hh_len, fragheaderlen,
 					  transhdrlen, mtu, flags, rt);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 266a264..ddd94ec 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -314,7 +314,7 @@
  *   Create tunnel matching given parameters.
  *
  * Return:
- *   created tunnel or NULL
+ *   created tunnel or error pointer
  **/
 
 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
@@ -322,7 +322,7 @@
 	struct net_device *dev;
 	struct ip6_tnl *t;
 	char name[IFNAMSIZ];
-	int err;
+	int err = -ENOMEM;
 
 	if (p->name[0])
 		strlcpy(name, p->name, IFNAMSIZ);
@@ -348,7 +348,7 @@
 failed_free:
 	ip6_dev_free(dev);
 failed:
-	return NULL;
+	return ERR_PTR(err);
 }
 
 /**
@@ -362,7 +362,7 @@
  *   tunnel device is created and registered for use.
  *
  * Return:
- *   matching tunnel or NULL
+ *   matching tunnel or error pointer
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
@@ -380,13 +380,13 @@
 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
 			if (create)
-				return NULL;
+				return ERR_PTR(-EEXIST);
 
 			return t;
 		}
 	}
 	if (!create)
-		return NULL;
+		return ERR_PTR(-ENODEV);
 	return ip6_tnl_create(net, p);
 }
 
@@ -1420,7 +1420,7 @@
 			}
 			ip6_tnl_parm_from_user(&p1, &p);
 			t = ip6_tnl_locate(net, &p1, 0);
-			if (t == NULL)
+			if (IS_ERR(t))
 				t = netdev_priv(dev);
 		} else {
 			memset(&p, 0, sizeof(p));
@@ -1445,7 +1445,7 @@
 		ip6_tnl_parm_from_user(&p1, &p);
 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
 		if (cmd == SIOCCHGTUNNEL) {
-			if (t != NULL) {
+			if (!IS_ERR(t)) {
 				if (t->dev != dev) {
 					err = -EEXIST;
 					break;
@@ -1457,14 +1457,15 @@
 			else
 				err = ip6_tnl_update(t, &p1);
 		}
-		if (t) {
+		if (!IS_ERR(t)) {
 			err = 0;
 			ip6_tnl_parm_to_user(&p, &t->parms);
 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 				err = -EFAULT;
 
-		} else
-			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+		} else {
+			err = PTR_ERR(t);
+		}
 		break;
 	case SIOCDELTUNNEL:
 		err = -EPERM;
@@ -1478,7 +1479,7 @@
 			err = -ENOENT;
 			ip6_tnl_parm_from_user(&p1, &p);
 			t = ip6_tnl_locate(net, &p1, 0);
-			if (t == NULL)
+			if (IS_ERR(t))
 				break;
 			err = -EPERM;
 			if (t->dev == ip6n->fb_tnl_dev)
@@ -1672,12 +1673,13 @@
 			   struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net *net = dev_net(dev);
-	struct ip6_tnl *nt;
+	struct ip6_tnl *nt, *t;
 
 	nt = netdev_priv(dev);
 	ip6_tnl_netlink_parms(data, &nt->parms);
 
-	if (ip6_tnl_locate(net, &nt->parms, 0))
+	t = ip6_tnl_locate(net, &nt->parms, 0);
+	if (!IS_ERR(t))
 		return -EEXIST;
 
 	return ip6_tnl_create2(dev);
@@ -1697,8 +1699,7 @@
 	ip6_tnl_netlink_parms(data, &p);
 
 	t = ip6_tnl_locate(net, &p, 0);
-
-	if (t) {
+	if (!IS_ERR(t)) {
 		if (t->dev != dev)
 			return -EEXIST;
 	} else
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bd46f73..a2dfff6 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -102,9 +102,10 @@
 
 	if (msg->msg_name) {
 		DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
-		if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
-		    u->sin6_family != AF_INET6) {
+		if (msg->msg_namelen < sizeof(*u))
 			return -EINVAL;
+		if (u->sin6_family != AF_INET6) {
+			return -EAFNOSUPPORT;
 		}
 		if (sk->sk_bound_dev_if &&
 		    sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index ca3f29b..010f8bd 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,6 +114,7 @@
 		return err;
 
 	skb->ignore_df = 1;
+	skb->protocol = htons(ETH_P_IPV6);
 
 	return x->outer_mode->output2(x, skb);
 }
@@ -122,7 +123,6 @@
 int xfrm6_output_finish(struct sk_buff *skb)
 {
 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-	skb->protocol = htons(ETH_P_IPV6);
 
 #ifdef CONFIG_NETFILTER
 	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 48bf5a0..8d2d01b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -200,6 +200,7 @@
 
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 		case IPPROTO_MH:
+			offset += ipv6_optlen(exthdr);
 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
 				struct ip6_mh *mh;
 
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 40695b9..683346d 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -798,7 +798,9 @@
 	orig_jiffies = jiffies;
 
 	/* Set poll time to 200 ms */
-	poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
+	poll_time = msecs_to_jiffies(200);
+	if (timeout)
+		poll_time = min_t(unsigned long, timeout, poll_time);
 
 	spin_lock_irqsave(&self->spinlock, flags);
 	while (self->tx_skb && self->tx_skb->len) {
@@ -811,7 +813,7 @@
 			break;
 	}
 	spin_unlock_irqrestore(&self->spinlock, flags);
-	current->state = TASK_RUNNING;
+	__set_current_state(TASK_RUNNING);
 }
 
 /*
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 3c83a1e..1215693 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -305,7 +305,7 @@
 
   /* Put ourselves on the wait queue to be woken up */
   add_wait_queue(&irnet_events.rwait, &wait);
-  current->state = TASK_INTERRUPTIBLE;
+  set_current_state(TASK_INTERRUPTIBLE);
   for(;;)
     {
       /* If there is unread events */
@@ -321,7 +321,7 @@
       /* Yield and wait to be woken up */
       schedule();
     }
-  current->state = TASK_RUNNING;
+  __set_current_state(TASK_RUNNING);
   remove_wait_queue(&irnet_events.rwait, &wait);
 
   /* Did we got it ? */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ff0d2db..5bcd4e5 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1508,6 +1508,8 @@
 	if (ieee80211_chanctx_refcount(local, ctx) == 0)
 		ieee80211_free_chanctx(local, ctx);
 
+	sdata->radar_required = false;
+
 	/* Unreserving may ready an in-place reservation. */
 	if (use_reserved_switch)
 		ieee80211_vif_use_reserved_switch(local);
@@ -1566,6 +1568,9 @@
 	ieee80211_recalc_smps_chanctx(local, ctx);
 	ieee80211_recalc_radar_chanctx(local, ctx);
  out:
+	if (ret)
+		sdata->radar_required = false;
+
 	mutex_unlock(&local->chanctx_mtx);
 	return ret;
 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3afe368..8d53d65 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,13 +58,24 @@
 #define IEEE80211_UNSET_POWER_LEVEL	INT_MIN
 
 /*
- * Some APs experience problems when working with U-APSD. Decrease the
- * probability of that happening by using legacy mode for all ACs but VO.
- * The AP that caused us trouble was a Cisco 4410N. It ignores our
- * setting, and always treats non-VO ACs as legacy.
+ * Some APs experience problems when working with U-APSD. Decreasing the
+ * probability of that happening by using legacy mode for all ACs but VO isn't
+ * enough.
+ *
+ * Cisco 4410N originally forced us to enable VO by default only because it
+ * treated non-VO ACs as legacy.
+ *
+ * However some APs (notably Netgear R7000) silently reclassify packets to
+ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
+ * clients would never see some frames (e.g. ARP responses) or would fetch them
+ * accidentally after a long time.
+ *
+ * It makes little sense to enable u-APSD queues by default because it needs
+ * userspace applications to be aware of it to actually take advantage of the
+ * possible additional powersavings. Implicitly depending on driver autotrigger
+ * frame support doesn't make much sense.
  */
-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
-	IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
+#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
 
 #define IEEE80211_DEFAULT_MAX_SP_LEN		\
 	IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -453,6 +464,7 @@
 	unsigned int flags;
 
 	bool csa_waiting_bcn;
+	bool csa_ignored_same_chan;
 
 	bool beacon_crc_valid;
 	u32 beacon_crc;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 10ac632..142f66a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1150,6 +1150,17 @@
 		return;
 	}
 
+	if (cfg80211_chandef_identical(&csa_ie.chandef,
+				       &sdata->vif.bss_conf.chandef)) {
+		if (ifmgd->csa_ignored_same_chan)
+			return;
+		sdata_info(sdata,
+			   "AP %pM tries to chanswitch to same channel, ignore\n",
+			   ifmgd->associated->bssid);
+		ifmgd->csa_ignored_same_chan = true;
+		return;
+	}
+
 	mutex_lock(&local->mtx);
 	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1210,6 +1221,7 @@
 	sdata->vif.csa_active = true;
 	sdata->csa_chandef = csa_ie.chandef;
 	sdata->csa_block_tx = csa_ie.mode;
+	ifmgd->csa_ignored_same_chan = false;
 
 	if (sdata->csa_block_tx)
 		ieee80211_stop_vif_queues(local, sdata,
@@ -2090,6 +2102,7 @@
 
 	sdata->vif.csa_active = false;
 	ifmgd->csa_waiting_bcn = false;
+	ifmgd->csa_ignored_same_chan = false;
 	if (sdata->csa_block_tx) {
 		ieee80211_wake_vif_queues(local, sdata,
 					  IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3204,7 +3217,8 @@
 	(1ULL << WLAN_EID_CHANNEL_SWITCH) |
 	(1ULL << WLAN_EID_PWR_CONSTRAINT) |
 	(1ULL << WLAN_EID_HT_CAPABILITY) |
-	(1ULL << WLAN_EID_HT_OPERATION);
+	(1ULL << WLAN_EID_HT_OPERATION) |
+	(1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
 
 static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_mgmt *mgmt, size_t len,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 7c86a00..ef6e8a6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -373,7 +373,7 @@
 		rate++;
 		mi->sample_deferred++;
 	} else {
-		if (!msr->sample_limit != 0)
+		if (!msr->sample_limit)
 			return;
 
 		mi->sample_packets++;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1101563..944bdc0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2214,6 +2214,9 @@
 	hdr = (struct ieee80211_hdr *) skb->data;
 	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
 
+	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
+		return RX_DROP_MONITOR;
+
 	/* frame is in RMC, don't forward */
 	if (ieee80211_is_data(hdr->frame_control) &&
 	    is_multicast_ether_addr(hdr->addr1) &&
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 88a18ff..07bd8db 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -566,6 +566,7 @@
 		if (tx->sdata->control_port_no_encrypt)
 			info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
 		info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+		info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
 	}
 
 	return TX_CONTINUE;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8428f4a..747bdcf 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3178,7 +3178,7 @@
 		wdev_iter = &sdata_iter->wdev;
 
 		if (sdata_iter == sdata ||
-		    rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
+		    !ieee80211_sdata_running(sdata_iter) ||
 		    local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
 			continue;
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e557590..ed99448 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3402,7 +3402,7 @@
 		if (udest.af == 0)
 			udest.af = svc->af;
 
-		if (udest.af != svc->af) {
+		if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
 			/* The synchronization protocol is incompatible
 			 * with mixed family services
 			 */
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index c47ffd7..d93ceeb 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -896,6 +896,8 @@
 			IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
 			return;
 		}
+		if (!(flags & IP_VS_CONN_F_TEMPLATE))
+			kfree(param->pe_data);
 	}
 
 	if (opt)
@@ -1169,6 +1171,7 @@
 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
 				);
 #endif
+	ip_vs_pe_put(param.pe);
 	return 0;
 	/* Error exit */
 out:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 199fd0f..6ab7779 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -227,7 +227,7 @@
 
 static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
 {
-	rule->genmask = 0;
+	rule->genmask &= ~(1 << gencursor_next(net));
 }
 
 static int
@@ -1711,9 +1711,12 @@
 	}
 	nla_nest_end(skb, list);
 
-	if (rule->ulen &&
-	    nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
-		goto nla_put_failure;
+	if (rule->udata) {
+		struct nft_userdata *udata = nft_userdata(rule);
+		if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1,
+			    udata->data) < 0)
+			goto nla_put_failure;
+	}
 
 	nlmsg_end(skb, nlh);
 	return 0;
@@ -1896,11 +1899,12 @@
 	struct nft_table *table;
 	struct nft_chain *chain;
 	struct nft_rule *rule, *old_rule = NULL;
+	struct nft_userdata *udata;
 	struct nft_trans *trans = NULL;
 	struct nft_expr *expr;
 	struct nft_ctx ctx;
 	struct nlattr *tmp;
-	unsigned int size, i, n, ulen = 0;
+	unsigned int size, i, n, ulen = 0, usize = 0;
 	int err, rem;
 	bool create;
 	u64 handle, pos_handle;
@@ -1968,12 +1972,19 @@
 			n++;
 		}
 	}
+	/* Check for overflow of dlen field */
+	err = -EFBIG;
+	if (size >= 1 << 12)
+		goto err1;
 
-	if (nla[NFTA_RULE_USERDATA])
+	if (nla[NFTA_RULE_USERDATA]) {
 		ulen = nla_len(nla[NFTA_RULE_USERDATA]);
+		if (ulen > 0)
+			usize = sizeof(struct nft_userdata) + ulen;
+	}
 
 	err = -ENOMEM;
-	rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL);
+	rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
 	if (rule == NULL)
 		goto err1;
 
@@ -1981,10 +1992,13 @@
 
 	rule->handle = handle;
 	rule->dlen   = size;
-	rule->ulen   = ulen;
+	rule->udata  = ulen ? 1 : 0;
 
-	if (ulen)
-		nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen);
+	if (ulen) {
+		udata = nft_userdata(rule);
+		udata->len = ulen - 1;
+		nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen);
+	}
 
 	expr = nft_expr_first(rule);
 	for (i = 0; i < n; i++) {
@@ -2031,12 +2045,6 @@
 
 err3:
 	list_del_rcu(&rule->list);
-	if (trans) {
-		list_del_rcu(&nft_trans_rule(trans)->list);
-		nft_rule_clear(net, nft_trans_rule(trans));
-		nft_trans_destroy(trans);
-		chain->use++;
-	}
 err2:
 	nf_tables_rule_destroy(&ctx, rule);
 err1:
@@ -3612,12 +3620,11 @@
 						 &te->elem,
 						 NFT_MSG_DELSETELEM, 0);
 			te->set->ops->get(te->set, &te->elem);
-			te->set->ops->remove(te->set, &te->elem);
 			nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
-			if (te->elem.flags & NFT_SET_MAP) {
-				nft_data_uninit(&te->elem.data,
-						te->set->dtype);
-			}
+			if (te->set->flags & NFT_SET_MAP &&
+			    !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+				nft_data_uninit(&te->elem.data, te->set->dtype);
+			te->set->ops->remove(te->set, &te->elem);
 			nft_trans_destroy(trans);
 			break;
 		}
@@ -3658,7 +3665,7 @@
 {
 	struct net *net = sock_net(skb->sk);
 	struct nft_trans *trans, *next;
-	struct nft_set *set;
+	struct nft_trans_elem *te;
 
 	list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
 		switch (trans->msg_type) {
@@ -3719,9 +3726,13 @@
 			break;
 		case NFT_MSG_NEWSETELEM:
 			nft_trans_elem_set(trans)->nelems--;
-			set = nft_trans_elem_set(trans);
-			set->ops->get(set, &nft_trans_elem(trans));
-			set->ops->remove(set, &nft_trans_elem(trans));
+			te = (struct nft_trans_elem *)trans->data;
+			te->set->ops->get(te->set, &te->elem);
+			nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
+			if (te->set->flags & NFT_SET_MAP &&
+			    !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
+				nft_data_uninit(&te->elem.data, te->set->dtype);
+			te->set->ops->remove(te->set, &te->elem);
 			nft_trans_destroy(trans);
 			break;
 		case NFT_MSG_DELSETELEM:
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index c598f74..213584c 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -123,7 +123,7 @@
 nft_target_set_tgchk_param(struct xt_tgchk_param *par,
 			   const struct nft_ctx *ctx,
 			   struct xt_target *target, void *info,
-			   union nft_entry *entry, u8 proto, bool inv)
+			   union nft_entry *entry, u16 proto, bool inv)
 {
 	par->net	= ctx->net;
 	par->table	= ctx->table->name;
@@ -137,7 +137,7 @@
 		entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
 		break;
 	case NFPROTO_BRIDGE:
-		entry->ebt.ethproto = proto;
+		entry->ebt.ethproto = (__force __be16)proto;
 		entry->ebt.invflags = inv ? EBT_IPROTO : 0;
 		break;
 	}
@@ -171,7 +171,7 @@
 	[NFTA_RULE_COMPAT_FLAGS]	= { .type = NLA_U32 },
 };
 
-static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv)
+static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
 {
 	struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
 	u32 flags;
@@ -203,7 +203,7 @@
 	struct xt_target *target = expr->ops->data;
 	struct xt_tgchk_param par;
 	size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
-	u8 proto = 0;
+	u16 proto = 0;
 	bool inv = false;
 	union nft_entry e = {};
 	int ret;
@@ -334,7 +334,7 @@
 static void
 nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
 			  struct xt_match *match, void *info,
-			  union nft_entry *entry, u8 proto, bool inv)
+			  union nft_entry *entry, u16 proto, bool inv)
 {
 	par->net	= ctx->net;
 	par->table	= ctx->table->name;
@@ -348,7 +348,7 @@
 		entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
 		break;
 	case NFPROTO_BRIDGE:
-		entry->ebt.ethproto = proto;
+		entry->ebt.ethproto = (__force __be16)proto;
 		entry->ebt.invflags = inv ? EBT_IPROTO : 0;
 		break;
 	}
@@ -385,7 +385,7 @@
 	struct xt_match *match = expr->ops->data;
 	struct xt_mtchk_param par;
 	size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
-	u8 proto = 0;
+	u16 proto = 0;
 	bool inv = false;
 	union nft_entry e = {};
 	int ret;
@@ -625,8 +625,12 @@
 		struct xt_match *match = nft_match->ops.data;
 
 		if (strcmp(match->name, mt_name) == 0 &&
-		    match->revision == rev && match->family == family)
+		    match->revision == rev && match->family == family) {
+			if (!try_module_get(match->me))
+				return ERR_PTR(-ENOENT);
+
 			return &nft_match->ops;
+		}
 	}
 
 	match = xt_request_find_match(family, mt_name, rev);
@@ -695,8 +699,12 @@
 		struct xt_target *target = nft_target->ops.data;
 
 		if (strcmp(target->name, tg_name) == 0 &&
-		    target->revision == rev && target->family == family)
+		    target->revision == rev && target->family == family) {
+			if (!try_module_get(target->me))
+				return ERR_PTR(-ENOENT);
+
 			return &nft_target->ops;
+		}
 	}
 
 	target = xt_request_find_target(family, tg_name, rev);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 61e6c40..c82df0a 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -192,8 +192,6 @@
 		.key_offset = offsetof(struct nft_hash_elem, key),
 		.key_len = set->klen,
 		.hashfn = jhash,
-		.grow_decision = rht_grow_above_75,
-		.shrink_decision = rht_shrink_below_30,
 	};
 
 	return rhashtable_init(priv, &params);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 30dbe34..45e1b30 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -378,12 +378,11 @@
 	mutex_lock(&recent_mutex);
 	t = recent_table_lookup(recent_net, info->name);
 	if (t != NULL) {
-		if (info->hit_count > t->nstamps_max_mask) {
-			pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
-				info->hit_count, t->nstamps_max_mask + 1,
-				info->name);
-			ret = -EINVAL;
-			goto out;
+		if (nstamp_mask > t->nstamps_max_mask) {
+			spin_lock_bh(&recent_lock);
+			recent_table_flush(t);
+			t->nstamps_max_mask = nstamp_mask;
+			spin_unlock_bh(&recent_lock);
 		}
 
 		t->refcnt++;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 1ba6793..13332dbf 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -243,12 +243,13 @@
 extract_icmp6_fields(const struct sk_buff *skb,
 		     unsigned int outside_hdrlen,
 		     int *protocol,
-		     struct in6_addr **raddr,
-		     struct in6_addr **laddr,
+		     const struct in6_addr **raddr,
+		     const struct in6_addr **laddr,
 		     __be16 *rport,
-		     __be16 *lport)
+		     __be16 *lport,
+		     struct ipv6hdr *ipv6_var)
 {
-	struct ipv6hdr *inside_iph, _inside_iph;
+	const struct ipv6hdr *inside_iph;
 	struct icmp6hdr *icmph, _icmph;
 	__be16 *ports, _ports[2];
 	u8 inside_nexthdr;
@@ -263,12 +264,14 @@
 	if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
 		return 1;
 
-	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
+	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
+					sizeof(*ipv6_var), ipv6_var);
 	if (inside_iph == NULL)
 		return 1;
 	inside_nexthdr = inside_iph->nexthdr;
 
-	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
+					      sizeof(*ipv6_var),
 					 &inside_nexthdr, &inside_fragoff);
 	if (inside_hdrlen < 0)
 		return 1; /* hjm: Packet has no/incomplete transport layer headers. */
@@ -315,10 +318,10 @@
 static bool
 socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 {
-	struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
 	struct udphdr _hdr, *hp = NULL;
 	struct sock *sk = skb->sk;
-	struct in6_addr *daddr = NULL, *saddr = NULL;
+	const struct in6_addr *daddr = NULL, *saddr = NULL;
 	__be16 uninitialized_var(dport), uninitialized_var(sport);
 	int thoff = 0, uninitialized_var(tproto);
 	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
@@ -342,7 +345,7 @@
 
 	} else if (tproto == IPPROTO_ICMPV6) {
 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
-					 &sport, &dport))
+					 &sport, &dport, &ipv6_var))
 			return false;
 	} else {
 		return false;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2702673..05919bf 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3126,8 +3126,6 @@
 		.key_len = sizeof(u32), /* portid */
 		.hashfn = jhash,
 		.max_shift = 16, /* 64K */
-		.grow_decision = rht_grow_above_75,
-		.shrink_decision = rht_shrink_below_30,
 	};
 
 	if (err != 0)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ae5e77c..5bae724 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2194,14 +2194,55 @@
 	return 0;
 }
 
-static void __net_exit ovs_exit_net(struct net *net)
+static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
+					    struct list_head *head)
+{
+	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+	struct datapath *dp;
+
+	list_for_each_entry(dp, &ovs_net->dps, list_node) {
+		int i;
+
+		for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+			struct vport *vport;
+
+			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
+				struct netdev_vport *netdev_vport;
+
+				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
+					continue;
+
+				netdev_vport = netdev_vport_priv(vport);
+				if (dev_net(netdev_vport->dev) == dnet)
+					list_add(&vport->detach_list, head);
+			}
+		}
+	}
+}
+
+static void __net_exit ovs_exit_net(struct net *dnet)
 {
 	struct datapath *dp, *dp_next;
-	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+	struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
+	struct vport *vport, *vport_next;
+	struct net *net;
+	LIST_HEAD(head);
 
 	ovs_lock();
 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
 		__dp_destroy(dp);
+
+	rtnl_lock();
+	for_each_net(net)
+		list_vports_from_net(net, dnet, &head);
+	rtnl_unlock();
+
+	/* Detach all vports from given namespace. */
+	list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
+		list_del(&vport->detach_list);
+		ovs_dp_detach_port(vport);
+	}
+
 	ovs_unlock();
 
 	cancel_work_sync(&ovs_net->dp_notify_work);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 216f20b..22b18c1 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2253,14 +2253,20 @@
 						struct sk_buff *skb)
 {
 	const struct nlattr *ovs_key = nla_data(a);
+	struct nlattr *nla;
 	size_t key_len = nla_len(ovs_key) / 2;
 
 	/* Revert the conversion we did from a non-masked set action to
 	 * masked set action.
 	 */
-	if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key))
+	nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
+	if (!nla)
 		return -EMSGSIZE;
 
+	if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
+		return -EMSGSIZE;
+
+	nla_nest_end(skb, nla);
 	return 0;
 }
 
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index f8ae295..bc85331 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -103,6 +103,7 @@
  * @ops: Class structure.
  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
  * @err_stats: Points to error statistics used and maintained by vport
+ * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
 	struct rcu_head rcu;
@@ -117,6 +118,7 @@
 	struct pcpu_sw_netstats __percpu *percpu_stats;
 
 	struct vport_err_stats err_stats;
+	struct list_head detach_list;
 };
 
 /**
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9c28cec..f8db706 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -698,6 +698,10 @@
 
 	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 		if (!frozen) {
+			if (!BLOCK_NUM_PKTS(pbd)) {
+				/* An empty block. Just refresh the timer. */
+				goto refresh_timer;
+			}
 			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 			if (!prb_dispatch_next_block(pkc, po))
 				goto refresh_timer;
@@ -798,7 +802,11 @@
 		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 	} else {
-		/* Ok, we tmo'd - so get the current time */
+		/* Ok, we tmo'd - so get the current time.
+		 *
+		 * It shouldn't really happen as we don't close empty
+		 * blocks. See prb_retire_rx_blk_timer_expired().
+		 */
 		struct timespec ts;
 		getnstimeofday(&ts);
 		h1->ts_last_pkt.ts_sec = ts.tv_sec;
@@ -1349,14 +1357,14 @@
 		return 0;
 	}
 
+	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
+		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
+		if (!skb)
+			return 0;
+	}
 	switch (f->type) {
 	case PACKET_FANOUT_HASH:
 	default:
-		if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
-			skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
-			if (!skb)
-				return 0;
-		}
 		idx = fanout_demux_hash(f, skb, num);
 		break;
 	case PACKET_FANOUT_LB:
@@ -3115,11 +3123,18 @@
 	return 0;
 }
 
-static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
+static void packet_dev_mclist_delete(struct net_device *dev,
+				     struct packet_mclist **mlp)
 {
-	for ( ; i; i = i->next) {
-		if (i->ifindex == dev->ifindex)
-			packet_dev_mc(dev, i, what);
+	struct packet_mclist *ml;
+
+	while ((ml = *mlp) != NULL) {
+		if (ml->ifindex == dev->ifindex) {
+			packet_dev_mc(dev, ml, -1);
+			*mlp = ml->next;
+			kfree(ml);
+		} else
+			mlp = &ml->next;
 	}
 }
 
@@ -3196,12 +3211,11 @@
 					packet_dev_mc(dev, ml, -1);
 				kfree(ml);
 			}
-			rtnl_unlock();
-			return 0;
+			break;
 		}
 	}
 	rtnl_unlock();
-	return -EADDRNOTAVAIL;
+	return 0;
 }
 
 static void packet_flush_mclist(struct sock *sk)
@@ -3551,7 +3565,7 @@
 		switch (msg) {
 		case NETDEV_UNREGISTER:
 			if (po->mclist)
-				packet_dev_mclist(dev, po->mclist, -1);
+				packet_dev_mclist_delete(dev, &po->mclist);
 			/* fallthrough */
 
 		case NETDEV_DOWN:
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index a817705..dba8d08 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -88,7 +88,9 @@
 			int *unpinned);
 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
 
-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
+static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
+			     struct rds_iw_device **rds_iwdev,
+			     struct rdma_cm_id **cm_id)
 {
 	struct rds_iw_device *iwdev;
 	struct rds_iw_cm_id *i_cm_id;
@@ -112,15 +114,15 @@
 				src_addr->sin_port,
 				dst_addr->sin_addr.s_addr,
 				dst_addr->sin_port,
-				rs->rs_bound_addr,
-				rs->rs_bound_port,
-				rs->rs_conn_addr,
-				rs->rs_conn_port);
+				src->sin_addr.s_addr,
+				src->sin_port,
+				dst->sin_addr.s_addr,
+				dst->sin_port);
 #ifdef WORKING_TUPLE_DETECTION
-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
-			    src_addr->sin_port == rs->rs_bound_port &&
-			    dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
-			    dst_addr->sin_port == rs->rs_conn_port) {
+			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
+			    src_addr->sin_port == src->sin_port &&
+			    dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
+			    dst_addr->sin_port == dst->sin_port) {
 #else
 			/* FIXME - needs to compare the local and remote
 			 * ipaddr/port tuple, but the ipaddr is the only
@@ -128,7 +130,7 @@
 			 * zero'ed.  It doesn't appear to be properly populated
 			 * during connection setup...
 			 */
-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
+			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
 #endif
 				spin_unlock_irq(&iwdev->spinlock);
 				*rds_iwdev = iwdev;
@@ -180,19 +182,13 @@
 {
 	struct sockaddr_in *src_addr, *dst_addr;
 	struct rds_iw_device *rds_iwdev_old;
-	struct rds_sock rs;
 	struct rdma_cm_id *pcm_id;
 	int rc;
 
 	src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
 	dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
 
-	rs.rs_bound_addr = src_addr->sin_addr.s_addr;
-	rs.rs_bound_port = src_addr->sin_port;
-	rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
-	rs.rs_conn_port = dst_addr->sin_port;
-
-	rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
+	rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
 	if (rc)
 		rds_iw_remove_cm_id(rds_iwdev, cm_id);
 
@@ -598,9 +594,17 @@
 	struct rds_iw_device *rds_iwdev;
 	struct rds_iw_mr *ibmr = NULL;
 	struct rdma_cm_id *cm_id;
+	struct sockaddr_in src = {
+		.sin_addr.s_addr = rs->rs_bound_addr,
+		.sin_port = rs->rs_bound_port,
+	};
+	struct sockaddr_in dst = {
+		.sin_addr.s_addr = rs->rs_conn_addr,
+		.sin_port = rs->rs_conn_port,
+	};
 	int ret;
 
-	ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
+	ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
 	if (ret || !cm_id) {
 		ret = -ENODEV;
 		goto out;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c6be17a..e0547f5 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -218,7 +218,8 @@
 	struct rxrpc_header *hdr;
 	struct sk_buff *txb;
 	unsigned long *p_txb, resend_at;
-	int loop, stop;
+	bool stop;
+	int loop;
 	u8 resend;
 
 	_enter("{%d,%d,%d,%d},",
@@ -226,7 +227,7 @@
 	       atomic_read(&call->sequence),
 	       CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
 
-	stop = 0;
+	stop = false;
 	resend = 0;
 	resend_at = 0;
 
@@ -255,11 +256,11 @@
 			_proto("Tx DATA %%%u { #%d }",
 			       ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
 			if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
-				stop = 0;
+				stop = true;
 				sp->resend_at = jiffies + 3;
 			} else {
 				sp->resend_at =
-					jiffies + rxrpc_resend_timeout * HZ;
+					jiffies + rxrpc_resend_timeout;
 			}
 		}
 
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5394b6b..0610efa 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -42,7 +42,8 @@
 		_leave("UDP socket errqueue empty");
 		return;
 	}
-	if (!skb->len) {
+	serr = SKB_EXT_ERR(skb);
+	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
 		_leave("UDP empty message");
 		kfree_skb(skb);
 		return;
@@ -50,7 +51,6 @@
 
 	rxrpc_new_skb(skb);
 
-	serr = SKB_EXT_ERR(skb);
 	addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
 	port = serr->port;
 
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 4575485..19a5606 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -87,7 +87,7 @@
 		if (!skb) {
 			/* nothing remains on the queue */
 			if (copied &&
-			    (msg->msg_flags & MSG_PEEK || timeo == 0))
+			    (flags & MSG_PEEK || timeo == 0))
 				goto out;
 
 			/* wait for a message to turn up */
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 82c5d7f..5f6288f 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -25,21 +25,41 @@
 		   struct tcf_result *res)
 {
 	struct tcf_bpf *b = a->priv;
-	int action;
-	int filter_res;
+	int action, filter_res;
 
 	spin_lock(&b->tcf_lock);
+
 	b->tcf_tm.lastuse = jiffies;
 	bstats_update(&b->tcf_bstats, skb);
-	action = b->tcf_action;
 
 	filter_res = BPF_PROG_RUN(b->filter, skb);
-	if (filter_res == 0) {
-		/* Return code 0 from the BPF program
-		 * is being interpreted as a drop here.
-		 */
-		action = TC_ACT_SHOT;
+
+	/* A BPF program may overwrite the default action opcode.
+	 * Similarly as in cls_bpf, if filter_res == -1 we use the
+	 * default action specified from tc.
+	 *
+	 * In case a different well-known TC_ACT opcode has been
+	 * returned, it will overwrite the default one.
+	 *
+	 * For everything else that is unkown, TC_ACT_UNSPEC is
+	 * returned.
+	 */
+	switch (filter_res) {
+	case TC_ACT_PIPE:
+	case TC_ACT_RECLASSIFY:
+	case TC_ACT_OK:
+		action = filter_res;
+		break;
+	case TC_ACT_SHOT:
+		action = filter_res;
 		b->tcf_qstats.drops++;
+		break;
+	case TC_ACT_UNSPEC:
+		action = b->tcf_action;
+		break;
+	default:
+		action = TC_ACT_UNSPEC;
+		break;
 	}
 
 	spin_unlock(&b->tcf_lock);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 09487af..95fdf4e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -78,8 +78,11 @@
 	struct tc_u_common	*tp_c;
 	int			refcnt;
 	unsigned int		divisor;
-	struct tc_u_knode __rcu	*ht[1];
 	struct rcu_head		rcu;
+	/* The 'ht' field MUST be the last field in structure to allow for
+	 * more entries allocated at end of structure.
+	 */
+	struct tc_u_knode __rcu	*ht[1];
 };
 
 struct tc_u_common {
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 6742200..fbb7ebf 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -228,6 +228,7 @@
 				 * to replay the request.
 				 */
 				module_put(em->ops->owner);
+				em->ops = NULL;
 				err = -EAGAIN;
 			}
 #endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index abbb7dc..59eeed4 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -217,6 +217,8 @@
 
 	for (i = 0; i < arg->npages && arg->pages[i]; i++)
 		__free_page(arg->pages[i]);
+
+	kfree(arg->pages);
 }
 
 static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 224a82f..1095be9 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -463,6 +463,8 @@
 		/* number of additional gid's */
 		if (get_int(&mesg, &N))
 			goto out;
+		if (N < 0 || N > NGROUPS_MAX)
+			goto out;
 		status = -ENOMEM;
 		rsci.cred.cr_group_info = groups_alloc(N);
 		if (rsci.cred.cr_group_info == NULL)
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 651f49a..9dd0ea8d 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -309,12 +309,15 @@
 	struct rpc_xprt *xprt = req->rq_xprt;
 	struct svc_serv *bc_serv = xprt->bc_serv;
 
+	spin_lock(&xprt->bc_pa_lock);
+	list_del(&req->rq_bc_pa_list);
+	spin_unlock(&xprt->bc_pa_lock);
+
 	req->rq_private_buf.len = copied;
 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
 	dprintk("RPC:       add callback request to list\n");
 	spin_lock(&bc_serv->sv_cb_lock);
-	list_del(&req->rq_bc_pa_list);
 	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
 	wake_up(&bc_serv->sv_cb_waitq);
 	spin_unlock(&bc_serv->sv_cb_lock);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 33fb105..5199bb1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -921,7 +921,7 @@
 	poll_wait(filp, &queue_wait, wait);
 
 	/* alway allow write */
-	mask = POLL_OUT | POLLWRNORM;
+	mask = POLLOUT | POLLWRNORM;
 
 	if (!rp)
 		return mask;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 7e9acd9..91ffde8 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -738,8 +738,9 @@
 	struct rpc_xprt *xprt = rep->rr_xprt;
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 	__be32 *iptr;
-	int credits, rdmalen, status;
+	int rdmalen, status;
 	unsigned long cwnd;
+	u32 credits;
 
 	/* Check status. If bad, signal disconnect and return rep to pool */
 	if (rep->rr_len == ~0U) {
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index d1b7039..0a16fb6 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -285,7 +285,7 @@
  */
 struct rpcrdma_buffer {
 	spinlock_t	rb_lock;	/* protects indexes */
-	int		rb_max_requests;/* client max requests */
+	u32		rb_max_requests;/* client max requests */
 	struct list_head rb_mws;	/* optional memory windows/fmrs/frmrs */
 	struct list_head rb_all;
 	int		rb_send_index;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a4cf3643..14f09b3 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -464,10 +464,11 @@
 	/* Clean up all queues, except inputq: */
 	__skb_queue_purge(&l_ptr->outqueue);
 	__skb_queue_purge(&l_ptr->deferred_queue);
-	skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
-	if (!skb_queue_empty(&l_ptr->inputq))
+	if (!owner->inputq)
+		owner->inputq = &l_ptr->inputq;
+	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
+	if (!skb_queue_empty(owner->inputq))
 		owner->action_flags |= TIPC_MSG_EVT;
-	owner->inputq = &l_ptr->inputq;
 	l_ptr->next_out = NULL;
 	l_ptr->unacked_window = 0;
 	l_ptr->checkpoint = 1;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f73e975..b4d4467 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2364,8 +2364,6 @@
 		.hashfn = jhash,
 		.max_shift = 20, /* 1M */
 		.min_shift = 8,  /* 256 */
-		.grow_decision = rht_grow_above_75,
-		.shrink_decision = rht_shrink_below_30,
 	};
 
 	return rhashtable_init(&tn->sk_rht, &rht_params);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3af0ecf..2a0bbd2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1199,6 +1199,7 @@
 	regulatory_exit();
 out_fail_reg:
 	debugfs_remove(ieee80211_debugfs_dir);
+	nl80211_exit();
 out_fail_nl80211:
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 out_fail_notifier:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d78fd8b..b6f84f6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2654,10 +2654,6 @@
 			return err;
 	}
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-	if (!msg)
-		return -ENOMEM;
-
 	err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
 				  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
 				  &flags);
@@ -2666,6 +2662,10 @@
 	    !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
 		return -EOPNOTSUPP;
 
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
 	wdev = rdev_add_virtual_intf(rdev,
 				nla_data(info->attrs[NL80211_ATTR_IFNAME]),
 				type, err ? NULL : &flags, &params);
@@ -4400,6 +4400,16 @@
 	if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
 		return -EINVAL;
 
+	/* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
+	 * as userspace might just pass through the capabilities from the IEs
+	 * directly, rather than enforcing this restriction and returning an
+	 * error in this case.
+	 */
+	if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
+		params.ht_capa = NULL;
+		params.vht_capa = NULL;
+	}
+
 	/* When you run into this, adjust the code below for the new flag */
 	BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
 
@@ -12528,9 +12538,7 @@
 			}
 
 			for (j = 0; j < match->n_channels; j++) {
-				if (nla_put_u32(msg,
-						NL80211_ATTR_WIPHY_FREQ,
-						match->channels[j])) {
+				if (nla_put_u32(msg, j, match->channels[j])) {
 					nla_nest_cancel(msg, nl_freqs);
 					nla_nest_cancel(msg, nl_match);
 					goto out;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index b586d0d..48dfc7b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -228,7 +228,7 @@
 
 /* We keep a static world regulatory domain in case of the absence of CRDA */
 static const struct ieee80211_regdomain world_regdom = {
-	.n_reg_rules = 6,
+	.n_reg_rules = 8,
 	.alpha2 =  "00",
 	.reg_rules = {
 		/* IEEE 802.11b/g, channels 1..11 */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cee479b..638af06 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2269,11 +2269,9 @@
 		 * have the xfrm_state's. We need to wait for KM to
 		 * negotiate new SA's or bail out with error.*/
 		if (net->xfrm.sysctl_larval_drop) {
-			dst_release(dst);
-			xfrm_pols_put(pols, drop_pols);
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
-
-			return ERR_PTR(-EREMOTE);
+			err = -EREMOTE;
+			goto error;
 		}
 
 		err = -EAGAIN;
@@ -2324,7 +2322,8 @@
 error:
 	dst_release(dst);
 dropdst:
-	dst_release(dst_orig);
+	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
+		dst_release(dst_orig);
 	xfrm_pols_put(pols, drop_pols);
 	return ERR_PTR(err);
 }
@@ -2338,7 +2337,8 @@
 				    struct sock *sk, int flags)
 {
 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
-					    flags | XFRM_LOOKUP_QUEUE);
+					    flags | XFRM_LOOKUP_QUEUE |
+					    XFRM_LOOKUP_KEEP_DST_REF);
 
 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py
new file mode 100644
index 0000000..4680fb1
--- /dev/null
+++ b/scripts/gdb/linux/__init__.py
@@ -0,0 +1 @@
+# nothing to do for the initialization of this package
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 97130f8..e4ea626 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -112,9 +112,9 @@
 	return aa_dfa_next(dfa, start, 0);
 }
 
-static inline bool mediated_filesystem(struct inode *inode)
+static inline bool mediated_filesystem(struct dentry *dentry)
 {
-	return !(inode->i_sb->s_flags & MS_NOUSER);
+	return !(dentry->d_sb->s_flags & MS_NOUSER);
 }
 
 #endif /* __APPARMOR_H */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 65ca451..107db88 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -226,7 +226,7 @@
 	struct inode *inode = dentry->d_inode;
 	struct path_cond cond = { };
 
-	if (!inode || !dir->mnt || !mediated_filesystem(inode))
+	if (!inode || !dir->mnt || !mediated_filesystem(dentry))
 		return 0;
 
 	cond.uid = inode->i_uid;
@@ -250,7 +250,7 @@
 {
 	struct path_cond cond = { current_fsuid(), mode };
 
-	if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
+	if (!dir->mnt || !mediated_filesystem(dir->dentry))
 		return 0;
 
 	return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
@@ -285,7 +285,7 @@
 				  path->dentry->d_inode->i_mode
 	};
 
-	if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
+	if (!path->mnt || !mediated_filesystem(path->dentry))
 		return 0;
 
 	return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
@@ -305,7 +305,7 @@
 	struct aa_profile *profile;
 	int error = 0;
 
-	if (!mediated_filesystem(old_dentry->d_inode))
+	if (!mediated_filesystem(old_dentry))
 		return 0;
 
 	profile = aa_current_profile();
@@ -320,7 +320,7 @@
 	struct aa_profile *profile;
 	int error = 0;
 
-	if (!mediated_filesystem(old_dentry->d_inode))
+	if (!mediated_filesystem(old_dentry))
 		return 0;
 
 	profile = aa_current_profile();
@@ -346,7 +346,7 @@
 
 static int apparmor_path_chmod(struct path *path, umode_t mode)
 {
-	if (!mediated_filesystem(path->dentry->d_inode))
+	if (!mediated_filesystem(path->dentry))
 		return 0;
 
 	return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
@@ -358,7 +358,7 @@
 				   path->dentry->d_inode->i_mode
 	};
 
-	if (!mediated_filesystem(path->dentry->d_inode))
+	if (!mediated_filesystem(path->dentry))
 		return 0;
 
 	return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
@@ -366,7 +366,7 @@
 
 static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-	if (!mediated_filesystem(dentry->d_inode))
+	if (!mediated_filesystem(dentry))
 		return 0;
 
 	return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
@@ -379,7 +379,7 @@
 	struct aa_profile *profile;
 	int error = 0;
 
-	if (!mediated_filesystem(file_inode(file)))
+	if (!mediated_filesystem(file->f_path.dentry))
 		return 0;
 
 	/* If in exec, permission is handled by bprm hooks.
@@ -432,7 +432,7 @@
 	BUG_ON(!fprofile);
 
 	if (!file->f_path.mnt ||
-	    !mediated_filesystem(file_inode(file)))
+	    !mediated_filesystem(file->f_path.dentry))
 		return 0;
 
 	profile = __aa_current_profile();
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 35b394a..71e0e3a 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -114,7 +114,7 @@
 	 *    security_path hooks as a deleted dentry except without an inode
 	 *    allocated.
 	 */
-	if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+	if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
 	    !(flags & PATH_MEDIATE_DELETED)) {
 			error = -ENOENT;
 			goto out;
diff --git a/security/inode.c b/security/inode.c
index 8e7ca62..131a3c4 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -203,7 +203,7 @@
 	mutex_lock(&parent->d_inode->i_mutex);
 	if (positive(dentry)) {
 		if (dentry->d_inode) {
-			if (S_ISDIR(dentry->d_inode->i_mode))
+			if (d_is_dir(dentry))
 				simple_rmdir(parent->d_inode, dentry);
 			else
 				simple_unlink(parent->d_inode, dentry);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 29c39e0..4d1a541 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1799,7 +1799,7 @@
 
 	old_dsec = old_dir->i_security;
 	old_isec = old_dentry->d_inode->i_security;
-	old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+	old_is_dir = d_is_dir(old_dentry);
 	new_dsec = new_dir->i_security;
 
 	ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1822,14 +1822,14 @@
 
 	ad.u.dentry = new_dentry;
 	av = DIR__ADD_NAME | DIR__SEARCH;
-	if (new_dentry->d_inode)
+	if (d_is_positive(new_dentry))
 		av |= DIR__REMOVE_NAME;
 	rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
 	if (rc)
 		return rc;
-	if (new_dentry->d_inode) {
+	if (d_is_positive(new_dentry)) {
 		new_isec = new_dentry->d_inode->i_security;
-		new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+		new_is_dir = d_is_dir(new_dentry);
 		rc = avc_has_perm(sid, new_isec->sid,
 				  new_isec->sclass,
 				  (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ed94f6f..c934311 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -855,7 +855,7 @@
 	rc = smk_curacc(isp, MAY_WRITE, &ad);
 	rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
 
-	if (rc == 0 && new_dentry->d_inode != NULL) {
+	if (rc == 0 && d_is_positive(new_dentry)) {
 		isp = smk_of_inode(new_dentry->d_inode);
 		smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
 		rc = smk_curacc(isp, MAY_WRITE, &ad);
@@ -961,7 +961,7 @@
 	rc = smk_curacc(isp, MAY_READWRITE, &ad);
 	rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
 
-	if (rc == 0 && new_dentry->d_inode != NULL) {
+	if (rc == 0 && d_is_positive(new_dentry)) {
 		isp = smk_of_inode(new_dentry->d_inode);
 		smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
 		rc = smk_curacc(isp, MAY_READWRITE, &ad);
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 4003907..c151a18 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -905,11 +905,9 @@
 	    !tomoyo_get_realpath(&buf2, path2))
 		goto out;
 	switch (operation) {
-		struct dentry *dentry;
 	case TOMOYO_TYPE_RENAME:
 	case TOMOYO_TYPE_LINK:
-		dentry = path1->dentry;
-		if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
+		if (!d_is_dir(path1->dentry))
 			break;
 		/* fall through */
 	case TOMOYO_TYPE_PIVOT_ROOT:
diff --git a/sound/core/control.c b/sound/core/control.c
index 35324a8..eeb691d 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1170,6 +1170,10 @@
 
 	if (info->count < 1)
 		return -EINVAL;
+	if (!*info->id.name)
+		return -EINVAL;
+	if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
+		return -EINVAL;
 	access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
 		(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
 				 SNDRV_CTL_ELEM_ACCESS_INACTIVE|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index b03a638..279e24f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1552,6 +1552,8 @@
 			if (! snd_pcm_playback_empty(substream)) {
 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
+			} else {
+				runtime->status->state = SNDRV_PCM_STATE_SETUP;
 			}
 			break;
 		case SNDRV_PCM_STATE_RUNNING:
diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c
index 9b6470c..7ba9373 100644
--- a/sound/core/seq/seq_midi_emul.c
+++ b/sound/core/seq/seq_midi_emul.c
@@ -269,6 +269,9 @@
 {
 	int  i;
 
+	if (control >= ARRAY_SIZE(chan->control))
+		return;
+
 	/* Switches */
 	if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
 		/* These are all switches; either off or on so set to 0 or 127 */
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index f62780e..7821b07 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -105,6 +105,8 @@
 		int pitchbend = chan->midi_pitchbend;
 		int segment;
 
+		if (pitchbend < -0x2000)
+			pitchbend = -0x2000;
 		if (pitchbend > 0x1FFF)
 			pitchbend = 0x1FFF;
 
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 0d58018..5cc356d 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -33,7 +33,7 @@
  */
 #define MAX_MIDI_RX_BLOCKS	8
 
-#define TRANSFER_DELAY_TICKS	0x2e00 /* 479.17 µs */
+#define TRANSFER_DELAY_TICKS	0x2e00 /* 479.17 microseconds */
 
 /* isochronous header parameters */
 #define ISO_DATA_LENGTH_SHIFT	16
@@ -78,7 +78,7 @@
 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
 		      enum amdtp_stream_direction dir, enum cip_flags flags)
 {
-	s->unit = fw_unit_get(unit);
+	s->unit = unit;
 	s->direction = dir;
 	s->flags = flags;
 	s->context = ERR_PTR(-1);
@@ -102,7 +102,6 @@
 {
 	WARN_ON(amdtp_stream_running(s));
 	mutex_destroy(&s->mutex);
-	fw_unit_put(s->unit);
 }
 EXPORT_SYMBOL(amdtp_stream_destroy);
 
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index fc19c99..611b7da 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -116,11 +116,22 @@
 	return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 bebob_card_free(struct snd_card *card)
 {
 	struct snd_bebob *bebob = card->private_data;
 
+	snd_bebob_stream_destroy_duplex(bebob);
+	fw_unit_put(bebob->unit);
+
+	kfree(bebob->maudio_special_quirk);
+
 	if (bebob->card_index >= 0) {
 		mutex_lock(&devices_mutex);
 		clear_bit(bebob->card_index, devices_used);
@@ -205,7 +216,7 @@
 	card->private_free = bebob_card_free;
 
 	bebob->card = card;
-	bebob->unit = unit;
+	bebob->unit = fw_unit_get(unit);
 	bebob->spec = spec;
 	mutex_init(&bebob->mutex);
 	spin_lock_init(&bebob->lock);
@@ -306,10 +317,11 @@
 	if (bebob == NULL)
 		return;
 
-	kfree(bebob->maudio_special_quirk);
+	/* Awake bus-reset waiters. */
+	if (!completion_done(&bebob->bus_reset))
+		complete_all(&bebob->bus_reset);
 
-	snd_bebob_stream_destroy_duplex(bebob);
-	snd_card_disconnect(bebob->card);
+	/* No need to wait for releasing card object in this context. */
 	snd_card_free_when_closed(bebob->card);
 }
 
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 0ebcabf..98e4fc8 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -410,8 +410,6 @@
 static void
 destroy_both_connections(struct snd_bebob *bebob)
 {
-	break_both_connections(bebob);
-
 	cmp_connection_destroy(&bebob->in_conn);
 	cmp_connection_destroy(&bebob->out_conn);
 }
@@ -712,22 +710,16 @@
 	mutex_unlock(&bebob->mutex);
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob)
 {
-	mutex_lock(&bebob->mutex);
-
-	amdtp_stream_pcm_abort(&bebob->rx_stream);
-	amdtp_stream_pcm_abort(&bebob->tx_stream);
-
-	amdtp_stream_stop(&bebob->rx_stream);
-	amdtp_stream_stop(&bebob->tx_stream);
-
 	amdtp_stream_destroy(&bebob->rx_stream);
 	amdtp_stream_destroy(&bebob->tx_stream);
 
 	destroy_both_connections(bebob);
-
-	mutex_unlock(&bebob->mutex);
 }
 
 /*
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index fa9cf76..07dbd01 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -311,14 +311,21 @@
 	return err;
 }
 
+/*
+ * This function should be called before starting streams or after stopping
+ * streams.
+ */
 static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
 {
-	amdtp_stream_destroy(stream);
+	struct fw_iso_resources *resources;
 
 	if (stream == &dice->tx_stream)
-		fw_iso_resources_destroy(&dice->tx_resources);
+		resources = &dice->tx_resources;
 	else
-		fw_iso_resources_destroy(&dice->rx_resources);
+		resources = &dice->rx_resources;
+
+	amdtp_stream_destroy(stream);
+	fw_iso_resources_destroy(resources);
 }
 
 int snd_dice_stream_init_duplex(struct snd_dice *dice)
@@ -332,6 +339,8 @@
 		goto end;
 
 	err = init_stream(dice, &dice->rx_stream);
+	if (err < 0)
+		destroy_stream(dice, &dice->tx_stream);
 end:
 	return err;
 }
@@ -340,10 +349,7 @@
 {
 	snd_dice_transaction_clear_enable(dice);
 
-	stop_stream(dice, &dice->tx_stream);
 	destroy_stream(dice, &dice->tx_stream);
-
-	stop_stream(dice, &dice->rx_stream);
 	destroy_stream(dice, &dice->rx_stream);
 
 	dice->substreams_counter = 0;
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 90d8f40..70a111d7f 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -226,11 +226,20 @@
 	strcpy(card->mixername, "DICE");
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void dice_card_free(struct snd_card *card)
 {
 	struct snd_dice *dice = card->private_data;
 
+	snd_dice_stream_destroy_duplex(dice);
 	snd_dice_transaction_destroy(dice);
+	fw_unit_put(dice->unit);
+
 	mutex_destroy(&dice->mutex);
 }
 
@@ -251,7 +260,7 @@
 
 	dice = card->private_data;
 	dice->card = card;
-	dice->unit = unit;
+	dice->unit = fw_unit_get(unit);
 	card->private_free = dice_card_free;
 
 	spin_lock_init(&dice->lock);
@@ -305,10 +314,7 @@
 {
 	struct snd_dice *dice = dev_get_drvdata(&unit->device);
 
-	snd_card_disconnect(dice->card);
-
-	snd_dice_stream_destroy_duplex(dice);
-
+	/* No need to wait for releasing card object in this context. */
 	snd_card_free_when_closed(dice->card);
 }
 
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 3e2ed8e..2682e7e 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -173,11 +173,23 @@
 	return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void
 efw_card_free(struct snd_card *card)
 {
 	struct snd_efw *efw = card->private_data;
 
+	snd_efw_stream_destroy_duplex(efw);
+	snd_efw_transaction_remove_instance(efw);
+	fw_unit_put(efw->unit);
+
+	kfree(efw->resp_buf);
+
 	if (efw->card_index >= 0) {
 		mutex_lock(&devices_mutex);
 		clear_bit(efw->card_index, devices_used);
@@ -185,7 +197,6 @@
 	}
 
 	mutex_destroy(&efw->mutex);
-	kfree(efw->resp_buf);
 }
 
 static int
@@ -218,7 +229,7 @@
 	card->private_free = efw_card_free;
 
 	efw->card = card;
-	efw->unit = unit;
+	efw->unit = fw_unit_get(unit);
 	mutex_init(&efw->mutex);
 	spin_lock_init(&efw->lock);
 	init_waitqueue_head(&efw->hwdep_wait);
@@ -289,10 +300,7 @@
 {
 	struct snd_efw *efw = dev_get_drvdata(&unit->device);
 
-	snd_efw_stream_destroy_duplex(efw);
-	snd_efw_transaction_remove_instance(efw);
-
-	snd_card_disconnect(efw->card);
+	/* No need to wait for releasing card object in this context. */
 	snd_card_free_when_closed(efw->card);
 }
 
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index 4f440e1..c55db1b 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -100,17 +100,22 @@
 	return err;
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 static void
 destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
 {
-	stop_stream(efw, stream);
-
-	amdtp_stream_destroy(stream);
+	struct cmp_connection *conn;
 
 	if (stream == &efw->tx_stream)
-		cmp_connection_destroy(&efw->out_conn);
+		conn = &efw->out_conn;
 	else
-		cmp_connection_destroy(&efw->in_conn);
+		conn = &efw->in_conn;
+
+	amdtp_stream_destroy(stream);
+	cmp_connection_destroy(&efw->out_conn);
 }
 
 static int
@@ -319,12 +324,8 @@
 
 void snd_efw_stream_destroy_duplex(struct snd_efw *efw)
 {
-	mutex_lock(&efw->mutex);
-
 	destroy_stream(efw, &efw->rx_stream);
 	destroy_stream(efw, &efw->tx_stream);
-
-	mutex_unlock(&efw->mutex);
 }
 
 void snd_efw_stream_lock_changed(struct snd_efw *efw)
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index 5f17b77..f0e4d50 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -26,7 +26,7 @@
 int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
 {
 	r->channels_mask = ~0uLL;
-	r->unit = fw_unit_get(unit);
+	r->unit = unit;
 	mutex_init(&r->mutex);
 	r->allocated = false;
 
@@ -42,7 +42,6 @@
 {
 	WARN_ON(r->allocated);
 	mutex_destroy(&r->mutex);
-	fw_unit_put(r->unit);
 }
 EXPORT_SYMBOL(fw_iso_resources_destroy);
 
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index bda845a..e6757cd 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -171,9 +171,10 @@
 	}
 
 	/* Wait first packet */
-	err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT);
-	if (err < 0)
+	if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) {
 		stop_stream(oxfw, stream);
+		err = -ETIMEDOUT;
+	}
 end:
 	return err;
 }
@@ -337,6 +338,10 @@
 	stop_stream(oxfw, stream);
 }
 
+/*
+ * This function should be called before starting the stream or after stopping
+ * the streams.
+ */
 void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
 				     struct amdtp_stream *stream)
 {
@@ -347,8 +352,6 @@
 	else
 		conn = &oxfw->in_conn;
 
-	stop_stream(oxfw, stream);
-
 	amdtp_stream_destroy(stream);
 	cmp_connection_destroy(conn);
 }
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 60e5cad..8c6ce01 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -104,11 +104,23 @@
 	return err;
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
 static void oxfw_card_free(struct snd_card *card)
 {
 	struct snd_oxfw *oxfw = card->private_data;
 	unsigned int i;
 
+	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+	if (oxfw->has_output)
+		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+
+	fw_unit_put(oxfw->unit);
+
 	for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
 		kfree(oxfw->tx_stream_formats[i]);
 		kfree(oxfw->rx_stream_formats[i]);
@@ -136,7 +148,7 @@
 	oxfw = card->private_data;
 	oxfw->card = card;
 	mutex_init(&oxfw->mutex);
-	oxfw->unit = unit;
+	oxfw->unit = fw_unit_get(unit);
 	oxfw->device_info = (const struct device_info *)id->driver_data;
 	spin_lock_init(&oxfw->lock);
 	init_waitqueue_head(&oxfw->hwdep_wait);
@@ -212,12 +224,7 @@
 {
 	struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
 
-	snd_card_disconnect(oxfw->card);
-
-	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
-	if (oxfw->has_output)
-		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-
+	/* No need to wait for releasing card object in this context. */
 	snd_card_free_when_closed(oxfw->card);
 }
 
diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c
index 17e49a0..b408540 100644
--- a/sound/isa/msnd/msnd_pinnacle_mixer.c
+++ b/sound/isa/msnd/msnd_pinnacle_mixer.c
@@ -306,11 +306,12 @@
 	spin_lock_init(&chip->mixer_lock);
 	strcpy(card->mixername, "MSND Pinnacle Mixer");
 
-	for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++)
+	for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) {
 		err = snd_ctl_add(card,
 				  snd_ctl_new1(snd_msnd_controls + idx, chip));
 		if (err < 0)
 			return err;
+	}
 
 	return 0;
 }
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index dfcb5e9..17c2637 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -961,7 +961,6 @@
 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
 	return err;
 }
-EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
 
 static void azx_init_cmd_io(struct azx *chip)
 {
@@ -1026,7 +1025,6 @@
 	azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
 	spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_init_cmd_io);
 
 static void azx_free_cmd_io(struct azx *chip)
 {
@@ -1036,7 +1034,6 @@
 	azx_writeb(chip, CORBCTL, 0);
 	spin_unlock_irq(&chip->reg_lock);
 }
-EXPORT_SYMBOL_GPL(azx_free_cmd_io);
 
 static unsigned int azx_command_addr(u32 cmd)
 {
@@ -1167,7 +1164,7 @@
 		}
 	}
 
-	if (!bus->no_response_fallback)
+	if (bus->no_response_fallback)
 		return -1;
 
 	if (!chip->polling_mode && chip->poll_count < 2) {
@@ -1316,7 +1313,6 @@
 	else
 		return azx_corb_send_cmd(bus, val);
 }
-EXPORT_SYMBOL_GPL(azx_send_cmd);
 
 /* get a response */
 static unsigned int azx_get_response(struct hda_bus *bus,
@@ -1330,7 +1326,6 @@
 	else
 		return azx_rirb_get_response(bus, addr);
 }
-EXPORT_SYMBOL_GPL(azx_get_response);
 
 #ifdef CONFIG_SND_HDA_DSP_LOADER
 /*
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b680b4e..8ec5289 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -687,12 +687,45 @@
 	return val;
 }
 
+/* is this a stereo widget or a stereo-to-mono mix? */
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
+{
+	unsigned int wcaps = get_wcaps(codec, nid);
+	hda_nid_t conn;
+
+	if (wcaps & AC_WCAP_STEREO)
+		return true;
+	if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
+		return false;
+	if (snd_hda_get_num_conns(codec, nid) != 1)
+		return false;
+	if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
+		return false;
+	return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
+}
+
 /* initialize the amp value (only at the first time) */
 static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
 {
 	unsigned int caps = query_amp_caps(codec, nid, dir);
 	int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
-	snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+
+	if (is_stereo_amps(codec, nid, dir))
+		snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+	else
+		snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
+}
+
+/* update the amp, doing in stereo or mono depending on NID */
+static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
+		      unsigned int mask, unsigned int val)
+{
+	if (is_stereo_amps(codec, nid, dir))
+		return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
+						mask, val);
+	else
+		return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
+						mask, val);
 }
 
 /* calculate amp value mask we can modify;
@@ -732,7 +765,7 @@
 		return;
 
 	val &= mask;
-	snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
+	update_amp(codec, nid, dir, idx, mask, val);
 }
 
 static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
@@ -4424,13 +4457,11 @@
 	has_amp = nid_has_mute(codec, mix, HDA_INPUT);
 	for (i = 0; i < nums; i++) {
 		if (has_amp)
-			snd_hda_codec_amp_stereo(codec, mix,
-						 HDA_INPUT, i,
-						 0xff, HDA_AMP_MUTE);
+			update_amp(codec, mix, HDA_INPUT, i,
+				   0xff, HDA_AMP_MUTE);
 		else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
-			snd_hda_codec_amp_stereo(codec, conn[i],
-						 HDA_OUTPUT, 0,
-						 0xff, HDA_AMP_MUTE);
+			update_amp(codec, conn[i], HDA_OUTPUT, 0,
+				   0xff, HDA_AMP_MUTE);
 	}
 }
 
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 36d2f20..4ca3d5d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1966,7 +1966,7 @@
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
 	/* Panther Point */
 	{ PCI_DEVICE(0x8086, 0x1e20),
-	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
 	/* Lynx Point */
 	{ PCI_DEVICE(0x8086, 0x8c20),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index ce5a6da..05e19f7 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -134,13 +134,38 @@
 		    (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
 }
 
+/* is this a stereo widget or a stereo-to-mono mix? */
+static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
+			   int dir, unsigned int wcaps, int indices)
+{
+	hda_nid_t conn;
+
+	if (wcaps & AC_WCAP_STEREO)
+		return true;
+	/* check for a stereo-to-mono mix; it must be:
+	 * only a single connection, only for input, and only a mixer widget
+	 */
+	if (indices != 1 || dir != HDA_INPUT ||
+	    get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
+		return false;
+
+	if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
+		return false;
+	/* the connection source is a stereo? */
+	wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
+	return !!(wcaps & AC_WCAP_STEREO);
+}
+
 static void print_amp_vals(struct snd_info_buffer *buffer,
 			   struct hda_codec *codec, hda_nid_t nid,
-			   int dir, int stereo, int indices)
+			   int dir, unsigned int wcaps, int indices)
 {
 	unsigned int val;
+	bool stereo;
 	int i;
 
+	stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
+
 	dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
 	for (i = 0; i < indices; i++) {
 		snd_iprintf(buffer, " [");
@@ -757,12 +782,10 @@
 			    (codec->single_adc_amp &&
 			     wid_type == AC_WID_AUD_IN))
 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
-					       wid_caps & AC_WCAP_STEREO,
-					       1);
+					       wid_caps, 1);
 			else
 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
-					       wid_caps & AC_WCAP_STEREO,
-					       conn_len);
+					       wid_caps, conn_len);
 		}
 		if (wid_caps & AC_WCAP_OUT_AMP) {
 			snd_iprintf(buffer, "  Amp-Out caps: ");
@@ -771,11 +794,10 @@
 			if (wid_type == AC_WID_PIN &&
 			    codec->pin_amp_workaround)
 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
-					       wid_caps & AC_WCAP_STEREO,
-					       conn_len);
+					       wid_caps, conn_len);
 			else
 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
-					       wid_caps & AC_WCAP_STEREO, 1);
+					       wid_caps, 1);
 		}
 
 		switch (wid_type) {
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 227990b..375e94f 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -329,8 +329,8 @@
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hda->regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(chip->remap_addr))
-		return PTR_ERR(chip->remap_addr);
+	if (IS_ERR(hda->regs))
+		return PTR_ERR(hda->regs);
 
 	chip->remap_addr = hda->regs + HDA_BAR0;
 	chip->addr = res->start + HDA_BAR0;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 1589c9b..dd2b3d9 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -393,6 +393,7 @@
 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+	SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
 	SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
 	SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
 	{} /* terminator */
@@ -584,6 +585,7 @@
 		return -ENOMEM;
 
 	spec->gen.automute_hook = cs_automute;
+	codec->single_adc_amp = 1;
 
 	snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
 			   cs420x_fixups);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fd3ed18..da67ea8 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -223,6 +223,7 @@
 	CXT_PINCFG_LENOVO_TP410,
 	CXT_PINCFG_LEMOTE_A1004,
 	CXT_PINCFG_LEMOTE_A1205,
+	CXT_PINCFG_COMPAQ_CQ60,
 	CXT_FIXUP_STEREO_DMIC,
 	CXT_FIXUP_INC_MIC_BOOST,
 	CXT_FIXUP_HEADPHONE_MIC_PIN,
@@ -660,6 +661,15 @@
 		.type = HDA_FIXUP_PINS,
 		.v.pins = cxt_pincfg_lemote,
 	},
+	[CXT_PINCFG_COMPAQ_CQ60] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			/* 0x17 was falsely set up as a mic, it should 0x1d */
+			{ 0x17, 0x400001f0 },
+			{ 0x1d, 0x97a70120 },
+			{ }
+		}
+	},
 	[CXT_FIXUP_STEREO_DMIC] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = cxt_fixup_stereo_dmic,
@@ -769,6 +779,7 @@
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
+	SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
 	{}
 };
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ddb9308..526398a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4937,6 +4937,7 @@
 	SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
 	SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
 	/* ALC282 */
+	SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
@@ -5208,6 +5209,13 @@
 		{0x17, 0x40000000},
 		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
+	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC255_STANDARD_PINS,
+		{0x12, 0x90a60170},
+		{0x14, 0x90170140},
+		{0x17, 0x40000000},
+		{0x1d, 0x40700001},
+		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
 		{0x12, 0x90a60130},
 		{0x13, 0x40000000},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6d36c5b..87eff31 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -79,6 +79,7 @@
 	STAC_ALIENWARE_M17X,
 	STAC_92HD89XX_HP_FRONT_JACK,
 	STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+	STAC_92HD73XX_ASUS_MOBO,
 	STAC_92HD73XX_MODELS
 };
 
@@ -1911,7 +1912,18 @@
 	[STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
-	}
+	},
+	[STAC_92HD73XX_ASUS_MOBO] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			/* enable 5.1 and SPDIF out */
+			{ 0x0c, 0x01014411 },
+			{ 0x0d, 0x01014410 },
+			{ 0x0e, 0x01014412 },
+			{ 0x22, 0x014b1180 },
+			{ }
+		}
+	},
 };
 
 static const struct hda_model_fixup stac92hd73xx_models[] = {
@@ -1923,6 +1935,7 @@
 	{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
 	{ .id = STAC_DELL_EQ, .name = "dell-eq" },
 	{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+	{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
 	{}
 };
 
@@ -1975,6 +1988,8 @@
 				"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
 				"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+	SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
+		      STAC_92HD73XX_ASUS_MOBO),
 	{} /* terminator */
 };
 
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 2c363fdc..ca67f89 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6082,6 +6082,9 @@
 		snd_pcm_hw_constraint_minmax(runtime,
 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
 					     64, 8192);
+		snd_pcm_hw_constraint_minmax(runtime,
+					     SNDRV_PCM_HW_PARAM_PERIODS,
+					     2, 2);
 		break;
 	}
 
@@ -6156,6 +6159,9 @@
 		snd_pcm_hw_constraint_minmax(runtime,
 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
 					     64, 8192);
+		snd_pcm_hw_constraint_minmax(runtime,
+					     SNDRV_PCM_HW_PARAM_PERIODS,
+					     2, 2);
 		break;
 	}
 
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index f5ad214..8de8361 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -46,8 +46,6 @@
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 
-#include <asm/mach-types.h>
-
 #include "../codecs/wm8731.h"
 #include "atmel-pcm.h"
 #include "atmel_ssc_dai.h"
@@ -171,9 +169,7 @@
 	int ret;
 
 	if (!np) {
-		if (!(machine_is_at91sam9g20ek() ||
-			machine_is_at91sam9g20ek_2mmc()))
-			return -ENODEV;
+		return -ENODEV;
 	}
 
 	ret = atmel_ssc_set_audio(0);
@@ -210,39 +206,37 @@
 	card->dev = &pdev->dev;
 
 	/* Parse device node info */
-	if (np) {
-		ret = snd_soc_of_parse_card_name(card, "atmel,model");
-		if (ret)
-			goto err;
+	ret = snd_soc_of_parse_card_name(card, "atmel,model");
+	if (ret)
+		goto err;
 
-		ret = snd_soc_of_parse_audio_routing(card,
-			"atmel,audio-routing");
-		if (ret)
-			goto err;
+	ret = snd_soc_of_parse_audio_routing(card,
+		"atmel,audio-routing");
+	if (ret)
+		goto err;
 
-		/* Parse codec info */
-		at91sam9g20ek_dai.codec_name = NULL;
-		codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
-		if (!codec_np) {
-			dev_err(&pdev->dev, "codec info missing\n");
-			return -EINVAL;
-		}
-		at91sam9g20ek_dai.codec_of_node = codec_np;
-
-		/* Parse dai and platform info */
-		at91sam9g20ek_dai.cpu_dai_name = NULL;
-		at91sam9g20ek_dai.platform_name = NULL;
-		cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
-		if (!cpu_np) {
-			dev_err(&pdev->dev, "dai and pcm info missing\n");
-			return -EINVAL;
-		}
-		at91sam9g20ek_dai.cpu_of_node = cpu_np;
-		at91sam9g20ek_dai.platform_of_node = cpu_np;
-
-		of_node_put(codec_np);
-		of_node_put(cpu_np);
+	/* Parse codec info */
+	at91sam9g20ek_dai.codec_name = NULL;
+	codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+	if (!codec_np) {
+		dev_err(&pdev->dev, "codec info missing\n");
+		return -EINVAL;
 	}
+	at91sam9g20ek_dai.codec_of_node = codec_np;
+
+	/* Parse dai and platform info */
+	at91sam9g20ek_dai.cpu_dai_name = NULL;
+	at91sam9g20ek_dai.platform_name = NULL;
+	cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+	if (!cpu_np) {
+		dev_err(&pdev->dev, "dai and pcm info missing\n");
+		return -EINVAL;
+	}
+	at91sam9g20ek_dai.cpu_of_node = cpu_np;
+	at91sam9g20ek_dai.platform_of_node = cpu_np;
+
+	of_node_put(codec_np);
+	of_node_put(cpu_np);
 
 	ret = snd_soc_register_card(card);
 	if (ret) {
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 7b7fbcd..c7cd60f 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -16,7 +16,7 @@
 
 config SND_EP93XX_SOC_SNAPPERCL15
         tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
-        depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15
+        depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
         select SND_EP93XX_SOC_I2S
         select SND_SOC_TLV320AIC23_I2C
         help
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 064e6c1..ea9f0e3 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -69,7 +69,7 @@
 	select SND_SOC_MAX98088 if I2C
 	select SND_SOC_MAX98090 if I2C
 	select SND_SOC_MAX98095 if I2C
-	select SND_SOC_MAX98357A
+	select SND_SOC_MAX98357A if GPIOLIB
 	select SND_SOC_MAX9850 if I2C
 	select SND_SOC_MAX9768 if I2C
 	select SND_SOC_MAX9877 if I2C
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index b67480f..4373ada 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -317,7 +317,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
-	unsigned int deemph = ucontrol->value.enumerated.item[0];
+	unsigned int deemph = ucontrol->value.integer.value[0];
 
 	if (deemph > 1)
 		return -EINVAL;
@@ -333,7 +333,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = adav80x->deemph;
+	ucontrol->value.integer.value[0] = adav80x->deemph;
 	return 0;
 };
 
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 70861c7..81b54a2 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -76,7 +76,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 
 	if (deemph > 1)
 		return -EINVAL;
@@ -92,7 +92,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = ak4641->deemph;
+	ucontrol->value.integer.value[0] = ak4641->deemph;
 	return 0;
 };
 
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 632e89f..2a58b1d 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -343,25 +343,25 @@
 };
 
 static const struct snd_soc_dapm_route ak4671_intercon[] = {
-	{"DAC Left", "NULL", "PMPLL"},
-	{"DAC Right", "NULL", "PMPLL"},
-	{"ADC Left", "NULL", "PMPLL"},
-	{"ADC Right", "NULL", "PMPLL"},
+	{"DAC Left", NULL, "PMPLL"},
+	{"DAC Right", NULL, "PMPLL"},
+	{"ADC Left", NULL, "PMPLL"},
+	{"ADC Right", NULL, "PMPLL"},
 
 	/* Outputs */
-	{"LOUT1", "NULL", "LOUT1 Mixer"},
-	{"ROUT1", "NULL", "ROUT1 Mixer"},
-	{"LOUT2", "NULL", "LOUT2 Mix Amp"},
-	{"ROUT2", "NULL", "ROUT2 Mix Amp"},
-	{"LOUT3", "NULL", "LOUT3 Mixer"},
-	{"ROUT3", "NULL", "ROUT3 Mixer"},
+	{"LOUT1", NULL, "LOUT1 Mixer"},
+	{"ROUT1", NULL, "ROUT1 Mixer"},
+	{"LOUT2", NULL, "LOUT2 Mix Amp"},
+	{"ROUT2", NULL, "ROUT2 Mix Amp"},
+	{"LOUT3", NULL, "LOUT3 Mixer"},
+	{"ROUT3", NULL, "ROUT3 Mixer"},
 
 	{"LOUT1 Mixer", "DACL", "DAC Left"},
 	{"ROUT1 Mixer", "DACR", "DAC Right"},
 	{"LOUT2 Mixer", "DACHL", "DAC Left"},
 	{"ROUT2 Mixer", "DACHR", "DAC Right"},
-	{"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"},
-	{"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"},
+	{"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
+	{"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
 	{"LOUT3 Mixer", "DACSL", "DAC Left"},
 	{"ROUT3 Mixer", "DACSR", "DAC Right"},
 
@@ -381,18 +381,18 @@
 	{"LIN2", NULL, "Mic Bias"},
 	{"RIN2", NULL, "Mic Bias"},
 
-	{"ADC Left", "NULL", "LIN MUX"},
-	{"ADC Right", "NULL", "RIN MUX"},
+	{"ADC Left", NULL, "LIN MUX"},
+	{"ADC Right", NULL, "RIN MUX"},
 
 	/* Analog Loops */
-	{"LIN1 Mixing Circuit", "NULL", "LIN1"},
-	{"RIN1 Mixing Circuit", "NULL", "RIN1"},
-	{"LIN2 Mixing Circuit", "NULL", "LIN2"},
-	{"RIN2 Mixing Circuit", "NULL", "RIN2"},
-	{"LIN3 Mixing Circuit", "NULL", "LIN3"},
-	{"RIN3 Mixing Circuit", "NULL", "RIN3"},
-	{"LIN4 Mixing Circuit", "NULL", "LIN4"},
-	{"RIN4 Mixing Circuit", "NULL", "RIN4"},
+	{"LIN1 Mixing Circuit", NULL, "LIN1"},
+	{"RIN1 Mixing Circuit", NULL, "RIN1"},
+	{"LIN2 Mixing Circuit", NULL, "LIN2"},
+	{"RIN2 Mixing Circuit", NULL, "RIN2"},
+	{"LIN3 Mixing Circuit", NULL, "LIN3"},
+	{"RIN3 Mixing Circuit", NULL, "RIN3"},
+	{"LIN4 Mixing Circuit", NULL, "LIN4"},
+	{"RIN4 Mixing Circuit", NULL, "RIN4"},
 
 	{"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
 	{"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 79a4efc..7d3a6ac 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -286,7 +286,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = cs4271->deemph;
+	ucontrol->value.integer.value[0] = cs4271->deemph;
 	return 0;
 }
 
@@ -296,7 +296,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
-	cs4271->deemph = ucontrol->value.enumerated.item[0];
+	cs4271->deemph = ucontrol->value.integer.value[0];
 	return cs4271_set_deemph(codec);
 }
 
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index ffe9617..911c26c 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -876,11 +876,11 @@
 
 static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
 	/* Inputs */
-	{"AUX1L PGA", "NULL", "AUX1L"},
-	{"AUX1R PGA", "NULL", "AUX1R"},
+	{"AUX1L PGA", NULL, "AUX1L"},
+	{"AUX1R PGA", NULL, "AUX1R"},
 	{"MIC1 PGA", NULL, "MIC1"},
-	{"MIC2 PGA", "NULL", "MIC2"},
-	{"MIC3 PGA", "NULL", "MIC3"},
+	{"MIC2 PGA", NULL, "MIC2"},
+	{"MIC3 PGA", NULL, "MIC3"},
 
 	/* Capture Path */
 	{"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index f273251..c5f35a0 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -120,7 +120,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = es8328->deemph;
+	ucontrol->value.integer.value[0] = es8328->deemph;
 	return 0;
 }
 
@@ -129,7 +129,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 	int ret;
 
 	if (deemph > 1)
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 1806333..e9e6efb 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -12,9 +12,19 @@
  * max98357a.c -- MAX98357A ALSA SoC Codec driver
  */
 
-#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
 #include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
 
 #define DRV_NAME "max98357a"
 
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index a722a02..477e13d 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -118,7 +118,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = priv->deemph;
+	ucontrol->value.integer.value[0] = priv->deemph;
 
 	return 0;
 }
@@ -129,7 +129,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 
-	priv->deemph = ucontrol->value.enumerated.item[0];
+	priv->deemph = ucontrol->value.integer.value[0];
 
 	return pcm1681_set_deemph(codec);
 }
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index f374840..9b541e5 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1198,7 +1198,7 @@
 		.ident = "Dell Dino",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-			DMI_MATCH(DMI_BOARD_NAME, "0144P8")
+			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
 		}
 	},
 	{ }
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index e1a4a45..fd10261 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -225,7 +225,6 @@
 	case RT5670_ADC_EQ_CTRL1:
 	case RT5670_EQ_CTRL1:
 	case RT5670_ALC_CTRL_1:
-	case RT5670_IRQ_CTRL1:
 	case RT5670_IRQ_CTRL2:
 	case RT5670_INT_IRQ_ST:
 	case RT5670_IL_CMD:
@@ -2703,6 +2702,12 @@
 
 	regmap_write(rt5670->regmap, RT5670_RESET, 0);
 
+	regmap_read(rt5670->regmap, RT5670_VENDOR_ID, &val);
+	if (val >= 4)
+		regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0980);
+	else
+		regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0d00);
+
 	ret = regmap_register_patch(rt5670->regmap, init_list,
 				    ARRAY_SIZE(init_list));
 	if (ret != 0)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 5d0bb87..fb9c20e 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -3284,8 +3284,8 @@
 	{ "IB45 Bypass Mux", "Bypass", "IB45 Mux" },
 	{ "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" },
 
-	{ "IB6 Mux", "IF1 DAC 6", "IF1 DAC6" },
-	{ "IB6 Mux", "IF2 DAC 6", "IF2 DAC6" },
+	{ "IB6 Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
+	{ "IB6 Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
 	{ "IB6 Mux", "SLB DAC 6", "SLB DAC6" },
 	{ "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" },
 	{ "IB6 Mux", "IF4 DAC L", "IF4 DAC L" },
@@ -3293,8 +3293,8 @@
 	{ "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" },
 	{ "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" },
 
-	{ "IB7 Mux", "IF1 DAC 7", "IF1 DAC7" },
-	{ "IB7 Mux", "IF2 DAC 7", "IF2 DAC7" },
+	{ "IB7 Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
+	{ "IB7 Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
 	{ "IB7 Mux", "SLB DAC 7", "SLB DAC7" },
 	{ "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" },
 	{ "IB7 Mux", "IF4 DAC R", "IF4 DAC R" },
@@ -3635,15 +3635,15 @@
 	{ "DAC1 FS", NULL, "DAC1 MIXL" },
 	{ "DAC1 FS", NULL, "DAC1 MIXR" },
 
-	{ "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2" },
-	{ "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2" },
+	{ "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2 Mux" },
+	{ "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2 Mux" },
 	{ "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" },
 	{ "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" },
 	{ "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" },
 	{ "DAC2 L Mux", "OB 2", "OutBound2" },
 
-	{ "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3" },
-	{ "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3" },
+	{ "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3 Mux" },
+	{ "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3 Mux" },
 	{ "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" },
 	{ "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" },
 	{ "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" },
@@ -3651,29 +3651,29 @@
 	{ "DAC2 R Mux", "Haptic Generator", "Haptic Generator" },
 	{ "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" },
 
-	{ "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4" },
-	{ "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4" },
+	{ "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4 Mux" },
+	{ "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4 Mux" },
 	{ "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" },
 	{ "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" },
 	{ "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" },
 	{ "DAC3 L Mux", "OB 4", "OutBound4" },
 
-	{ "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC4" },
-	{ "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC4" },
+	{ "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC5 Mux" },
+	{ "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC5 Mux" },
 	{ "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" },
 	{ "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" },
 	{ "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" },
 	{ "DAC3 R Mux", "OB 5", "OutBound5" },
 
-	{ "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6" },
-	{ "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6" },
+	{ "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6 Mux" },
+	{ "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6 Mux" },
 	{ "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" },
 	{ "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" },
 	{ "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" },
 	{ "DAC4 L Mux", "OB 6", "OutBound6" },
 
-	{ "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7" },
-	{ "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7" },
+	{ "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7 Mux" },
+	{ "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7 Mux" },
 	{ "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" },
 	{ "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" },
 	{ "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" },
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e182e65..3593a14 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1151,13 +1151,7 @@
 		/* Enable VDDC charge pump */
 		ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
 	} else if (vddio >= 3100 && vdda >= 3100) {
-		/*
-		 * if vddio and vddd > 3.1v,
-		 * charge pump should be clean before set ana_pwr
-		 */
-		snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
-				SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
-
+		ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
 		/* VDDC use VDDIO rail */
 		lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
 		lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 47b257e..82095d6c 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -538,8 +538,8 @@
 	/* speaker map */
 	{ "IHFOUTL", NULL, "Speaker Rail"},
 	{ "IHFOUTR", NULL, "Speaker Rail"},
-	{ "IHFOUTL", "NULL", "Speaker Left Playback"},
-	{ "IHFOUTR", "NULL", "Speaker Right Playback"},
+	{ "IHFOUTL", NULL, "Speaker Left Playback"},
+	{ "IHFOUTR", NULL, "Speaker Right Playback"},
 	{ "Speaker Left Playback", NULL, "Speaker Left Filter"},
 	{ "Speaker Right Playback", NULL, "Speaker Right Filter"},
 	{ "Speaker Left Filter", NULL, "IHFDAC Left"},
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 3a1343f..007a0e3 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -106,13 +106,11 @@
 };
 
 static const struct regmap_range sta32x_write_regs_range[] = {
-	regmap_reg_range(STA32X_CONFA,  STA32X_AUTO2),
-	regmap_reg_range(STA32X_C1CFG,  STA32X_FDRC2),
+	regmap_reg_range(STA32X_CONFA,  STA32X_FDRC2),
 };
 
 static const struct regmap_range sta32x_read_regs_range[] = {
-	regmap_reg_range(STA32X_CONFA,  STA32X_AUTO2),
-	regmap_reg_range(STA32X_C1CFG,  STA32X_FDRC2),
+	regmap_reg_range(STA32X_CONFA,  STA32X_FDRC2),
 };
 
 static const struct regmap_range sta32x_volatile_regs_range[] = {
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 249ef5c..32942be 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -281,7 +281,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = priv->deemph;
+	ucontrol->value.integer.value[0] = priv->deemph;
 
 	return 0;
 }
@@ -292,7 +292,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 
-	priv->deemph = ucontrol->value.enumerated.item[0];
+	priv->deemph = ucontrol->value.integer.value[0];
 
 	return tas5086_set_deemph(codec);
 }
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 8d9de49..21d5402 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -610,7 +610,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
 
-	ucontrol->value.enumerated.item[0] = wm2000->anc_active;
+	ucontrol->value.integer.value[0] = wm2000->anc_active;
 
 	return 0;
 }
@@ -620,7 +620,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
-	int anc_active = ucontrol->value.enumerated.item[0];
+	int anc_active = ucontrol->value.integer.value[0];
 	int ret;
 
 	if (anc_active > 1)
@@ -643,7 +643,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
 
-	ucontrol->value.enumerated.item[0] = wm2000->spk_ena;
+	ucontrol->value.integer.value[0] = wm2000->spk_ena;
 
 	return 0;
 }
@@ -653,7 +653,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
-	int val = ucontrol->value.enumerated.item[0];
+	int val = ucontrol->value.integer.value[0];
 	int ret;
 
 	if (val > 1)
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 098c143..c6d1053 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -125,7 +125,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = wm8731->deemph;
+	ucontrol->value.integer.value[0] = wm8731->deemph;
 
 	return 0;
 }
@@ -135,7 +135,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 	int ret = 0;
 
 	if (deemph > 1)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index dde462c..04b04f8 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -442,7 +442,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = wm8903->deemph;
+	ucontrol->value.integer.value[0] = wm8903->deemph;
 
 	return 0;
 }
@@ -452,7 +452,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 	int ret = 0;
 
 	if (deemph > 1)
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index d3b3f57..215e93c 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -525,7 +525,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = wm8904->deemph;
+	ucontrol->value.integer.value[0] = wm8904->deemph;
 	return 0;
 }
 
@@ -534,7 +534,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 
 	if (deemph > 1)
 		return -EINVAL;
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 1ab2d46..00bec91 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -393,7 +393,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = wm8955->deemph;
+	ucontrol->value.integer.value[0] = wm8955->deemph;
 	return 0;
 }
 
@@ -402,7 +402,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 
 	if (deemph > 1)
 		return -EINVAL;
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index cf8fecf..3035d98 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -184,7 +184,7 @@
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
 
-	ucontrol->value.enumerated.item[0] = wm8960->deemph;
+	ucontrol->value.integer.value[0] = wm8960->deemph;
 	return 0;
 }
 
@@ -193,7 +193,7 @@
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
-	int deemph = ucontrol->value.enumerated.item[0];
+	int deemph = ucontrol->value.integer.value[0];
 
 	if (deemph > 1)
 		return -EINVAL;
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 9517571..98c9525 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -180,7 +180,7 @@
 	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	unsigned int val = ucontrol->value.enumerated.item[0];
+	unsigned int val = ucontrol->value.integer.value[0];
 	struct soc_mixer_control *mc =
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int mixer, mask, shift, old;
@@ -193,7 +193,7 @@
 
 	mutex_lock(&wm9712->lock);
 	old = wm9712->hp_mixer[mixer];
-	if (ucontrol->value.enumerated.item[0])
+	if (ucontrol->value.integer.value[0])
 		wm9712->hp_mixer[mixer] |= mask;
 	else
 		wm9712->hp_mixer[mixer] &= ~mask;
@@ -231,7 +231,7 @@
 	mixer = mc->shift >> 8;
 	shift = mc->shift & 0xff;
 
-	ucontrol->value.enumerated.item[0] =
+	ucontrol->value.integer.value[0] =
 		(wm9712->hp_mixer[mixer] >> shift) & 1;
 
 	return 0;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 6822291..7955295 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -255,7 +255,7 @@
 	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
 	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-	unsigned int val = ucontrol->value.enumerated.item[0];
+	unsigned int val = ucontrol->value.integer.value[0];
 	struct soc_mixer_control *mc =
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int mixer, mask, shift, old;
@@ -268,7 +268,7 @@
 
 	mutex_lock(&wm9713->lock);
 	old = wm9713->hp_mixer[mixer];
-	if (ucontrol->value.enumerated.item[0])
+	if (ucontrol->value.integer.value[0])
 		wm9713->hp_mixer[mixer] |= mask;
 	else
 		wm9713->hp_mixer[mixer] &= ~mask;
@@ -306,7 +306,7 @@
 	mixer = mc->shift >> 8;
 	shift = mc->shift & 0xff;
 
-	ucontrol->value.enumerated.item[0] =
+	ucontrol->value.integer.value[0] =
 		(wm9713->hp_mixer[mixer] >> shift) & 1;
 
 	return 0;
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 75870c0..91eb3ae 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1049,7 +1049,7 @@
 				enum spdif_txrate index, bool round)
 {
 	const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
-	bool is_sysclk = clk == spdif_priv->sysclk;
+	bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
 	u64 rate_ideal, rate_actual, sub;
 	u32 sysclk_dfmin, sysclk_dfmax;
 	u32 txclk_df, sysclk_df, arate;
@@ -1143,7 +1143,7 @@
 			spdif_priv->txclk_src[index], rate[index]);
 	dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n",
 			spdif_priv->txclk_df[index], rate[index]);
-	if (spdif_priv->txclk[index] == spdif_priv->sysclk)
+	if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk))
 		dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n",
 				spdif_priv->sysclk_df[index], rate[index]);
 	dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n",
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 2595611..6b0c8f7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -603,17 +603,20 @@
 	factor = (div2 + 1) * (7 * psr + 1) * 2;
 
 	for (i = 0; i < 255; i++) {
-		/* The bclk rate must be smaller than 1/5 sysclk rate */
-		if (factor * (i + 1) < 5)
-			continue;
-
-		tmprate = freq * factor * (i + 2);
+		tmprate = freq * factor * (i + 1);
 
 		if (baudclk_is_used)
 			clkrate = clk_get_rate(ssi_private->baudclk);
 		else
 			clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
 
+		/*
+		 * Hardware limitation: The bclk rate must be
+		 * never greater than 1/5 IPG clock rate
+		 */
+		if (clkrate * 5 > clk_get_rate(ssi_private->clk))
+			continue;
+
 		clkrate /= factor;
 		afreq = clkrate / (i + 1);
 
@@ -1224,7 +1227,7 @@
 	ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
 	ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
 
-	ret = !of_property_read_u32_array(np, "dmas", dmas, 4);
+	ret = of_property_read_u32_array(np, "dmas", dmas, 4);
 	if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
 		ssi_private->use_dual_fifo = true;
 		/* When using dual fifo mode, we need to keep watermark
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index f7c6734..fb550b5 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -372,6 +372,11 @@
 			    strlen(dai_link->cpu_dai_name)   +
 			    strlen(dai_link->codec_dai_name) + 2,
 			    GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto dai_link_of_err;
+	}
+
 	sprintf(name, "%s-%s", dai_link->cpu_dai_name,
 				dai_link->codec_dai_name);
 	dai_link->name = dai_link->stream_name = name;
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h
index dfebfdd..daecc58 100644
--- a/sound/soc/intel/sst-atom-controls.h
+++ b/sound/soc/intel/sst-atom-controls.h
@@ -150,7 +150,7 @@
 
 enum sst_task {
 	SST_TASK_SBA = 1,
-	SST_TASK_MMX,
+	SST_TASK_MMX = 3,
 };
 
 enum sst_type {
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index c42ffae..402b728 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -207,9 +207,6 @@
 		module = (void *)module + sizeof(*module) + module->mod_size;
 	}
 
-	/* allocate scratch mem regions */
-	sst_block_alloc_scratch(dsp);
-
 	return 0;
 }
 
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 394af56..863a9ca 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -1732,6 +1732,7 @@
 int sst_hsw_dsp_load(struct sst_hsw *hsw)
 {
 	struct sst_dsp *dsp = hsw->dsp;
+	struct sst_fw *sst_fw, *t;
 	int ret;
 
 	dev_dbg(hsw->dev, "loading audio DSP....");
@@ -1748,12 +1749,17 @@
 		return ret;
 	}
 
-	ret = sst_fw_reload(hsw->sst_fw);
-	if (ret < 0) {
-		dev_err(hsw->dev, "error: SST FW reload failed\n");
-		sst_dsp_dma_put_channel(dsp);
-		return -ENOMEM;
+	list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) {
+		ret = sst_fw_reload(sst_fw);
+		if (ret < 0) {
+			dev_err(hsw->dev, "error: SST FW reload failed\n");
+			sst_dsp_dma_put_channel(dsp);
+			return -ENOMEM;
+		}
 	}
+	ret = sst_block_alloc_scratch(hsw->dsp);
+	if (ret < 0)
+		return -EINVAL;
 
 	sst_dsp_dma_put_channel(dsp);
 	return 0;
@@ -1809,12 +1815,17 @@
 
 int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw)
 {
-	sst_fw_unload(hsw->sst_fw);
-	sst_block_free_scratch(hsw->dsp);
+	struct sst_fw *sst_fw, *t;
+	struct sst_dsp *dsp = hsw->dsp;
+
+	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
+		sst_fw_unload(sst_fw);
+	}
+	sst_block_free_scratch(dsp);
 
 	hsw->boot_complete = false;
 
-	sst_dsp_sleep(hsw->dsp);
+	sst_dsp_sleep(dsp);
 
 	return 0;
 }
@@ -1943,6 +1954,11 @@
 		goto fw_err;
 	}
 
+	/* allocate scratch mem regions */
+	ret = sst_block_alloc_scratch(hsw->dsp);
+	if (ret < 0)
+		goto boot_err;
+
 	/* wait for DSP boot completion */
 	sst_dsp_boot(hsw->dsp);
 	ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete,
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index d6fa9d5..7e21e8f 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -91,7 +91,8 @@
 				  SNDRV_PCM_INFO_INTERLEAVED |
 				  SNDRV_PCM_INFO_PAUSE |
 				  SNDRV_PCM_INFO_RESUME |
-				  SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+				  SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+				  SNDRV_PCM_INFO_DRAIN_TRIGGER,
 	.formats		= SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
 				  SNDRV_PCM_FMTBIT_S32_LE,
 	.period_bytes_min	= PAGE_SIZE,
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c
index 8a8d56a..11c5786 100644
--- a/sound/soc/intel/sst/sst.c
+++ b/sound/soc/intel/sst/sst.c
@@ -350,7 +350,9 @@
 
 	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
 
-	shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+	shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
+	shim_regs->csr = sst_shim_read64(shim, SST_CSR);
+
 
 	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
 }
@@ -367,6 +369,7 @@
 	 */
 	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
 	sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+	sst_shim_write64(shim, SST_CSR, shim_regs->csr),
 	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
 }
 
@@ -379,6 +382,10 @@
 	 * initially active. So change the state to active before
 	 * enabling the pm
 	 */
+
+	if (!acpi_disabled)
+		pm_runtime_set_active(ctx->dev);
+
 	pm_runtime_enable(ctx->dev);
 
 	if (acpi_disabled)
@@ -409,6 +416,7 @@
 	synchronize_irq(ctx->irq_num);
 	flush_workqueue(ctx->post_msg_wq);
 
+	ctx->ops->reset(ctx);
 	/* save the shim registers because PMC doesn't save state */
 	sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
 
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index def7d82..d194830 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -579,7 +579,7 @@
 		if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
 	} else {
-		if (priv->extclk == priv->clk) {
+		if (clk_is_match(priv->extclk, priv->clk)) {
 			devm_clk_put(&pdev->dev, priv->extclk);
 			priv->extclk = ERR_PTR(-EINVAL);
 		} else {
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index ccfb41c..f7eb42a 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -352,6 +352,9 @@
 		return ret;
 
 	card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return -ENOMEM;
+
 	card->name = devm_kasprintf(dev, GFP_KERNEL,
 				    "HDMI %s", dev_name(ad->dssdev));
 	card->owner = THIS_MODULE;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index c7eb9dd..fd99d89 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -530,8 +530,19 @@
 
 	case OMAP_MCBSP_SYSCLK_CLKX_EXT:
 		regs->srgr2	|= CLKSM;
+		regs->pcr0	|= SCLKME;
+		/*
+		 * If McBSP is master but yet the CLKX/CLKR pin drives the SRG,
+		 * disable output on those pins. This enables to inject the
+		 * reference clock through CLKX/CLKR. For this to work
+		 * set_dai_sysclk() _needs_ to be called after set_dai_fmt().
+		 */
+		regs->pcr0	&= ~CLKXM;
+		break;
 	case OMAP_MCBSP_SYSCLK_CLKR_EXT:
 		regs->pcr0	|= SCLKME;
+		/* Disable ouput on CLKR pin in master mode */
+		regs->pcr0	&= ~CLKRM;
 		break;
 	default:
 		err = -ENODEV;
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index f4b05bc..1343ecb 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -201,7 +201,7 @@
 	struct snd_pcm *pcm = rtd->pcm;
 	int ret;
 
-	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 3cebf6c..0632a36 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -174,7 +174,7 @@
 
 config SND_SOC_SPEYSIDE
 	tristate "Audio support for Wolfson Speyside"
-	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
+	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C && SPI_MASTER
 	select SND_SAMSUNG_I2S
 	select SND_SOC_WM8996
 	select SND_SOC_WM9081
@@ -189,7 +189,7 @@
 
 config SND_SOC_BELLS
 	tristate "Audio support for Wolfson Bells"
-	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA
+	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA && I2C && SPI_MASTER
 	select SND_SAMSUNG_I2S
 	select SND_SOC_WM5102
 	select SND_SOC_WM5110
@@ -206,7 +206,7 @@
 
 config SND_SOC_LITTLEMILL
 	tristate "Audio support for Wolfson Littlemill"
-	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
+	depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C
 	select SND_SAMSUNG_I2S
 	select MFD_WM8994
 	select SND_SOC_WM8994
@@ -223,7 +223,7 @@
 
 config SND_SOC_ODROIDX2
 	tristate "Audio support for Odroid-X2 and Odroid-U3"
-	depends on SND_SOC_SAMSUNG
+	depends on SND_SOC_SAMSUNG && I2C
 	select SND_SOC_MAX98090
 	select SND_SAMSUNG_I2S
 	help
@@ -231,6 +231,6 @@
 
 config SND_SOC_ARNDALE_RT5631_ALC5631
         tristate "Audio support for RT5631(ALC5631) on Arndale Board"
-        depends on SND_SOC_SAMSUNG
+        depends on SND_SOC_SAMSUNG && I2C
         select SND_SAMSUNG_I2S
         select SND_SOC_RT5631
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1b53605..110577c5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1252,6 +1252,8 @@
 			goto exit_snd_probe;
 	}
 
+	dev_set_drvdata(dev, priv);
+
 	/*
 	 *	asoc register
 	 */
@@ -1268,8 +1270,6 @@
 		goto exit_snd_soc;
 	}
 
-	dev_set_drvdata(dev, priv);
-
 	pm_runtime_enable(dev);
 
 	dev_info(dev, "probed\n");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 30579ca..e5c9908 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -347,6 +347,8 @@
 	if (!buf)
 		return -ENOMEM;
 
+	mutex_lock(&client_mutex);
+
 	list_for_each_entry(codec, &codec_list, list) {
 		len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
 			       codec->component.name);
@@ -358,6 +360,8 @@
 		}
 	}
 
+	mutex_unlock(&client_mutex);
+
 	if (ret >= 0)
 		ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
@@ -382,6 +386,8 @@
 	if (!buf)
 		return -ENOMEM;
 
+	mutex_lock(&client_mutex);
+
 	list_for_each_entry(component, &component_list, list) {
 		list_for_each_entry(dai, &component->dai_list, list) {
 			len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
@@ -395,6 +401,8 @@
 		}
 	}
 
+	mutex_unlock(&client_mutex);
+
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
 	kfree(buf);
@@ -418,6 +426,8 @@
 	if (!buf)
 		return -ENOMEM;
 
+	mutex_lock(&client_mutex);
+
 	list_for_each_entry(platform, &platform_list, list) {
 		len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
 			       platform->component.name);
@@ -429,6 +439,8 @@
 		}
 	}
 
+	mutex_unlock(&client_mutex);
+
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
 	kfree(buf);
@@ -836,6 +848,8 @@
 {
 	struct snd_soc_component *component;
 
+	lockdep_assert_held(&client_mutex);
+
 	list_for_each_entry(component, &component_list, list) {
 		if (of_node) {
 			if (component->dev->of_node == of_node)
@@ -854,6 +868,8 @@
 	struct snd_soc_component *component;
 	struct snd_soc_dai *dai;
 
+	lockdep_assert_held(&client_mutex);
+
 	/* Find CPU DAI from registered DAIs*/
 	list_for_each_entry(component, &component_list, list) {
 		if (dlc->of_node && component->dev->of_node != dlc->of_node)
@@ -1508,6 +1524,7 @@
 	struct snd_soc_codec *codec;
 	int ret, i, order;
 
+	mutex_lock(&client_mutex);
 	mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
 
 	/* bind DAIs */
@@ -1662,6 +1679,7 @@
 	card->instantiated = 1;
 	snd_soc_dapm_sync(&card->dapm);
 	mutex_unlock(&card->mutex);
+	mutex_unlock(&client_mutex);
 
 	return 0;
 
@@ -1680,6 +1698,7 @@
 
 base_error:
 	mutex_unlock(&card->mutex);
+	mutex_unlock(&client_mutex);
 
 	return ret;
 }
@@ -2713,13 +2732,6 @@
 	list_del(&component->list);
 }
 
-static void snd_soc_component_del(struct snd_soc_component *component)
-{
-	mutex_lock(&client_mutex);
-	snd_soc_component_del_unlocked(component);
-	mutex_unlock(&client_mutex);
-}
-
 int snd_soc_register_component(struct device *dev,
 			       const struct snd_soc_component_driver *cmpnt_drv,
 			       struct snd_soc_dai_driver *dai_drv,
@@ -2767,14 +2779,17 @@
 {
 	struct snd_soc_component *cmpnt;
 
+	mutex_lock(&client_mutex);
 	list_for_each_entry(cmpnt, &component_list, list) {
 		if (dev == cmpnt->dev && cmpnt->registered_as_component)
 			goto found;
 	}
+	mutex_unlock(&client_mutex);
 	return;
 
 found:
-	snd_soc_component_del(cmpnt);
+	snd_soc_component_del_unlocked(cmpnt);
+	mutex_unlock(&client_mutex);
 	snd_soc_component_cleanup(cmpnt);
 	kfree(cmpnt);
 }
@@ -2882,10 +2897,14 @@
 {
 	struct snd_soc_platform *platform;
 
+	mutex_lock(&client_mutex);
 	list_for_each_entry(platform, &platform_list, list) {
-		if (dev == platform->dev)
+		if (dev == platform->dev) {
+			mutex_unlock(&client_mutex);
 			return platform;
+		}
 	}
+	mutex_unlock(&client_mutex);
 
 	return NULL;
 }
@@ -3090,15 +3109,15 @@
 {
 	struct snd_soc_codec *codec;
 
+	mutex_lock(&client_mutex);
 	list_for_each_entry(codec, &codec_list, list) {
 		if (dev == codec->dev)
 			goto found;
 	}
+	mutex_unlock(&client_mutex);
 	return;
 
 found:
-
-	mutex_lock(&client_mutex);
 	list_del(&codec->list);
 	snd_soc_component_del_unlocked(&codec->component);
 	mutex_unlock(&client_mutex);
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 03fed66..2ed260b 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -303,6 +303,11 @@
 		return err;
 	}
 
+	/* Don't check the sample rate for devices which we know don't
+	 * support reading */
+	if (snd_usb_get_sample_rate_quirk(chip))
+		return 0;
+
 	if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
 				   USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
 				   UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 99b63a7..81b7da8 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -302,14 +302,17 @@
 /*
 	Read data from device.
 */
-int line6_read_data(struct usb_line6 *line6, int address, void *data,
-		    size_t datalen)
+int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
+		    unsigned datalen)
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
 	unsigned char len;
 	unsigned count;
 
+	if (address > 0xffff || datalen > 0xff)
+		return -EINVAL;
+
 	/* query the serial number: */
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -370,14 +373,17 @@
 /*
 	Write data to device.
 */
-int line6_write_data(struct usb_line6 *line6, int address, void *data,
-		     size_t datalen)
+int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
+		     unsigned datalen)
 {
 	struct usb_device *usbdev = line6->usbdev;
 	int ret;
 	unsigned char status;
 	int count;
 
+	if (address > 0xffff || datalen > 0xffff)
+		return -EINVAL;
+
 	ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
 			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 			      0x0022, address, data, datalen,
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index 5d20294..7da643e 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -147,8 +147,8 @@
 
 extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
 				      int code2, int size);
-extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
-			   size_t datalen);
+extern int line6_read_data(struct usb_line6 *line6, unsigned address,
+			   void *data, unsigned datalen);
 extern int line6_read_serial_number(struct usb_line6 *line6,
 				    u32 *serial_number);
 extern int line6_send_raw_message_async(struct usb_line6 *line6,
@@ -161,8 +161,8 @@
 			      void (*function)(unsigned long),
 			      unsigned long data);
 extern int line6_version_request_async(struct usb_line6 *line6);
-extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
-			    size_t datalen);
+extern int line6_write_data(struct usb_line6 *line6, unsigned address,
+			    void *data, unsigned datalen);
 
 int line6_probe(struct usb_interface *interface,
 		const struct usb_device_id *id,
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c
index 05dee69..97ed593 100644
--- a/sound/usb/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -39,7 +39,7 @@
 		for (; p < buf_end; ++p) {
 			short pv = le16_to_cpu(*p);
 			int val = (pv * volume[chn & 1]) >> 8;
-			pv = clamp(val, 0x7fff, -0x8000);
+			pv = clamp(val, -0x8000, 0x7fff);
 			*p = cpu_to_le16(pv);
 			++chn;
 		}
@@ -54,7 +54,7 @@
 
 			val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16);
 			val = (val * volume[chn & 1]) >> 8;
-			val = clamp(val, 0x7fffff, -0x800000);
+			val = clamp(val, -0x800000, 0x7fffff);
 			p[0] = val;
 			p[1] = val >> 8;
 			p[2] = val >> 16;
@@ -126,7 +126,7 @@
 			short pov = le16_to_cpu(*po);
 			short piv = le16_to_cpu(*pi);
 			int val = pov + ((piv * volume) >> 8);
-			pov = clamp(val, 0x7fff, -0x8000);
+			pov = clamp(val, -0x8000, 0x7fff);
 			*po = cpu_to_le16(pov);
 		}
 	}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 67d4765..07f984d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1773,6 +1773,36 @@
 		}
 	}
 },
+{
+	USB_DEVICE(0x0582, 0x0159),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		/* .vendor_name = "Roland", */
+		/* .product_name = "UA-22", */
+		.ifnum = QUIRK_ANY_INTERFACE,
+		.type = QUIRK_COMPOSITE,
+		.data = (const struct snd_usb_audio_quirk[]) {
+			{
+				.ifnum = 0,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 1,
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
+			},
+			{
+				.ifnum = 2,
+				.type = QUIRK_MIDI_FIXED_ENDPOINT,
+				.data = & (const struct snd_usb_midi_endpoint_info) {
+					.out_cables = 0x0001,
+					.in_cables = 0x0001
+				}
+			},
+			{
+				.ifnum = -1
+			}
+		}
+	}
+},
 /* this catches most recent vendor-specific Roland devices */
 {
 	.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a739841..753a47d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1111,6 +1111,11 @@
 	}
 }
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+{
+	/* MS Lifecam HD-5000 doesn't support reading the sample rate. */
+	return chip->usb_id == USB_ID(0x045E, 0x076D);
+}
 
 /* Marantz/Denon USB DACs need a vendor cmd to switch
  * between PCM and native DSD mode
@@ -1122,6 +1127,7 @@
 	int err;
 
 	switch (subs->stream->chip->usb_id) {
+	case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
 
@@ -1201,6 +1207,7 @@
 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
 
 		switch (le16_to_cpu(dev->descriptor.idProduct)) {
+		case 0x1003: /* Denon DA300-USB */
 		case 0x3005: /* Marantz HD-DAC1 */
 		case 0x3006: /* Marantz SA-14S1 */
 			mdelay(20);
@@ -1262,6 +1269,7 @@
 
 	/* Denon/Marantz devices with USB DAC functionality */
 	switch (chip->usb_id) {
+	case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
 		if (fp->altsetting == 2)
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
index 1b86238..2cd71ed 100644
--- a/sound/usb/quirks.h
+++ b/sound/usb/quirks.h
@@ -21,6 +21,8 @@
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
 			      struct audioformat *fmt);
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip);
+
 int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
 				 struct audioformat *fp);
 
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 6c14afe..db1d3a2 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -289,7 +289,7 @@
 	memcpy_t fn = r->fn.memcpy;
 	int i;
 
-	memcpy_alloc_mem(&src, &dst, len);
+	memcpy_alloc_mem(&dst, &src, len);
 
 	if (prefault)
 		fn(dst, src, len);
@@ -312,7 +312,7 @@
 	void *src = NULL, *dst = NULL;
 	int i;
 
-	memcpy_alloc_mem(&src, &dst, len);
+	memcpy_alloc_mem(&dst, &src, len);
 
 	if (prefault)
 		fn(dst, src, len);
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index ff95a68..ac8721f 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -21,6 +21,10 @@
   endif
 endif
 
+ifeq ($(RAW_ARCH),sparc64)
+  ARCH ?= sparc
+endif
+
 ARCH ?= $(RAW_ARCH)
 
 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 42ac05a..b32ff33 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -49,7 +49,7 @@
 	$(BUILD)
 
 test-pthread-attr-setaffinity-np.bin:
-	$(BUILD) -Werror -lpthread
+	$(BUILD) -D_GNU_SOURCE -Werror -lpthread
 
 test-stackprotector-all.bin:
 	$(BUILD) -Werror -fstack-protector-all
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
index 0a0d3ec..2b81b72 100644
--- a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
+++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
@@ -5,10 +5,11 @@
 {
 	int ret = 0;
 	pthread_attr_t thread_attr;
+	cpu_set_t cs;
 
 	pthread_attr_init(&thread_attr);
 	/* don't care abt exact args, just the API itself in libpthread */
-	ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+	ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
 
 	return ret;
 }
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 61bf912..9d9db3b 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -30,6 +30,8 @@
 
 static void ins__delete(struct ins_operands *ops)
 {
+	if (ops == NULL)
+		return;
 	zfree(&ops->source.raw);
 	zfree(&ops->source.name);
 	zfree(&ops->target.raw);
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 47b78b3..6da965b 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -25,6 +25,10 @@
 	if (cpu < 0)
 		cpu = 0;
 
+	/*
+	 * Using -1 for the pid is a workaround to avoid gratuitous jump label
+	 * changes.
+	 */
 	while (1) {
 		/* check cloexec flag */
 		fd = sys_perf_event_open(&attr, pid, cpu, -1,
@@ -47,16 +51,24 @@
 		  err, strerror_r(err, sbuf, sizeof(sbuf)));
 
 	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
-	fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+	while (1) {
+		fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
+		if (fd < 0 && pid == -1 && errno == EACCES) {
+			pid = 0;
+			continue;
+		}
+		break;
+	}
 	err = errno;
 
+	if (fd >= 0)
+		close(fd);
+
 	if (WARN_ONCE(fd < 0 && err != EBUSY,
 		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
 		      err, strerror_r(err, sbuf, sizeof(sbuf))))
 		return -1;
 
-	close(fd);
-
 	return 0;
 }
 
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index c94a9e0..e99a676 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -28,7 +28,7 @@
 	int		 mask;
 	int		 refcnt;
 	unsigned int	 prev;
-	char		 event_copy[PERF_SAMPLE_MAX_SIZE];
+	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 
 struct perf_evlist {
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index b24f9d8..33b7a2a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -11,6 +11,11 @@
 #include <symbol/kallsyms.h>
 #include "debug.h"
 
+#ifndef EM_AARCH64
+#define EM_AARCH64	183  /* ARM 64 bit */
+#endif
+
+
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 extern char *cplus_demangle(const char *, int);
 
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 3ed7c04..2e2ba2e 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -209,7 +209,7 @@
 
 $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
 	$(ECHO) "  CC      " $@
-	$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
+	$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
 	$(QUIET) $(STRIPCMD) $@
 
 $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index e238c95..8d5d1d2e 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -30,7 +30,7 @@
 #ifdef __NR_execveat
 	return syscall(__NR_execveat, fd, path, argv, envp, flags);
 #else
-	errno = -ENOSYS;
+	errno = ENOSYS;
 	return -1;
 #endif
 }
@@ -234,6 +234,14 @@
 	int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC);
 	int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC);
 
+	/* Check if we have execveat at all, and bail early if not */
+	errno = 0;
+	execveat_(-1, NULL, NULL, NULL, 0);
+	if (errno == ENOSYS) {
+		printf("[FAIL] ENOSYS calling execveat - no kernel support?\n");
+		return 1;
+	}
+
 	/* Change file position to confirm it doesn't affect anything */
 	lseek(fd, 10, SEEK_SET);
 
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore
new file mode 100644
index 0000000..06e96be
--- /dev/null
+++ b/tools/thermal/tmon/.gitignore
@@ -0,0 +1 @@
+/tmon
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index e775adc..0788621 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -2,8 +2,8 @@
 
 BINDIR=usr/bin
 WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
-CFLAGS= -O1 ${WARNFLAGS} -fstack-protector
-CC=gcc
+CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector
+CC=$(CROSS_COMPILE)gcc
 
 CFLAGS+=-D VERSION=\"$(VERSION)\"
 LDFLAGS+=
@@ -16,12 +16,21 @@
 CONFIG_FILE=
 CONFIG_PATH=
 
+# Static builds might require -ltinfo, for instance
+ifneq ($(findstring -static, $(LDFLAGS)),)
+STATIC := --static
+endif
+
+TMON_LIBS=-lm -lpthread
+TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \
+		     pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \
+		     echo -lpanel -lncurses)
 
 OBJS = tmon.o tui.o sysfs.o pid.o
 OBJS +=
 
 tmon: $(OBJS) Makefile tmon.h
-	$(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
+	$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS)  -o $(TARGET) $(TMON_LIBS)
 
 valgrind: tmon
 	 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8
index 0be727c..02d5179 100644
--- a/tools/thermal/tmon/tmon.8
+++ b/tools/thermal/tmon/tmon.8
@@ -55,6 +55,8 @@
 .PP
 The \fB-t --time-interval\fP option sets the polling interval in seconds
 .PP
+The \fB-T --target-temp\fP option sets the initial target temperature
+.PP
 The \fB-v --version\fP option shows the version of \fBtmon \fP
 .PP
 The \fB-z --zone\fP option sets the target therma zone instance to be controlled
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index 09b7c32..9aa1965 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -64,6 +64,7 @@
 	printf("  -h, --help            show this help message\n");
 	printf("  -l, --log             log data to /var/tmp/tmon.log\n");
 	printf("  -t, --time-interval   sampling time interval, > 1 sec.\n");
+	printf("  -T, --target-temp     initial target temperature\n");
 	printf("  -v, --version         show version\n");
 	printf("  -z, --zone            target thermal zone id\n");
 
@@ -219,6 +220,7 @@
 	{ "control", 1, NULL, 'c' },
 	{ "daemon", 0, NULL, 'd' },
 	{ "time-interval", 1, NULL, 't' },
+	{ "target-temp", 1, NULL, 'T' },
 	{ "log", 0, NULL, 'l' },
 	{ "help", 0, NULL, 'h' },
 	{ "version", 0, NULL, 'v' },
@@ -231,7 +233,7 @@
 {
 	int err = 0;
 	int id2 = 0, c;
-	double yk = 0.0; /* controller output */
+	double yk = 0.0, temp; /* controller output */
 	int target_tz_index;
 
 	if (geteuid() != 0) {
@@ -239,7 +241,7 @@
 		exit(EXIT_FAILURE);
 	}
 
-	while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) {
+	while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) {
 		switch (c) {
 		case 'c':
 			no_control = 0;
@@ -254,6 +256,14 @@
 			if (ticktime < 1)
 				ticktime = 1;
 			break;
+		case 'T':
+			temp = strtod(optarg, NULL);
+			if (temp < 0) {
+				fprintf(stderr, "error: temperature must be positive\n");
+				return 1;
+			}
+			target_temp_user = temp;
+			break;
 		case 'l':
 			printf("Logging data to /var/tmp/tmon.log\n");
 			logging = 1;
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c
index 89f8ef0..b5d1c6b 100644
--- a/tools/thermal/tmon/tui.c
+++ b/tools/thermal/tmon/tui.c
@@ -30,6 +30,18 @@
 
 #include "tmon.h"
 
+#define min(x, y) ({				\
+	typeof(x) _min1 = (x);			\
+	typeof(y) _min2 = (y);			\
+	(void) (&_min1 == &_min2);		\
+	_min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({				\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	(void) (&_max1 == &_max2);		\
+	_max1 > _max2 ? _max1 : _max2; })
+
 static PANEL *data_panel;
 static PANEL *dialogue_panel;
 static PANEL *top;
@@ -98,6 +110,18 @@
 	wrefresh(status_bar_window);
 }
 
+/* wrap at 5 */
+#define DIAG_DEV_ROWS  5
+/*
+ * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit
+ */
+static int diag_dev_rows(void)
+{
+	int entries = ptdata.nr_cooling_dev + 1;
+	int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2);
+	return min(rows, entries);
+}
+
 void setup_windows(void)
 {
 	int y_begin = 1;
@@ -122,7 +146,7 @@
 	 * dialogue window is a pop-up, when needed it lays on top of cdev win
 	 */
 
-	dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50,
+	dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50,
 				DIAG_Y, DIAG_X);
 
 	thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor *
@@ -258,21 +282,26 @@
 }
 
 const char DIAG_TITLE[] = "[ TUNABLES ]";
-#define DIAG_DEV_ROWS  5
 void show_dialogue(void)
 {
 	int j, x = 0, y = 0;
+	int rows, cols;
 	WINDOW *w = dialogue_window;
 
 	if (tui_disabled || !w)
 		return;
 
+	getmaxyx(w, rows, cols);
+
+	/* Silence compiler 'unused' warnings */
+	(void)cols;
+
 	werase(w);
 	box(w, 0, 0);
 	mvwprintw(w, 0, maxx/4, DIAG_TITLE);
 	/* list all the available tunables */
 	for (j = 0; j <= ptdata.nr_cooling_dev; j++) {
-		y = j % DIAG_DEV_ROWS;
+		y = j % diag_dev_rows();
 		if (y == 0 && j != 0)
 			x += 20;
 		if (j == ptdata.nr_cooling_dev)
@@ -283,12 +312,10 @@
 				ptdata.cdi[j].type, ptdata.cdi[j].instance);
 	}
 	wattron(w, A_BOLD);
-	mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?");
+	mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?");
 	wattroff(w, A_BOLD);
-	/* y size of dialogue win is nr cdev + 5, so print legend
-	 * at the bottom line
-	 */
-	mvwprintw(w, ptdata.nr_cooling_dev+3, 1,
+	/* print legend at the bottom line */
+	mvwprintw(w, rows - 2, 1,
 		"Legend: A=Active, P=Passive, C=Critical");
 
 	wrefresh(dialogue_window);
@@ -437,7 +464,7 @@
 			snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ",
 				ptdata.cdi[cdev_id].type,
 				ptdata.cdi[cdev_id].instance);
-		write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2);
+		write_dialogue_win(buf, diag_dev_rows() + 2, 2);
 		handle_input_val(cdev_id);
 	} else {
 		snprintf(buf, sizeof(buf), "Invalid selection %d", ch);
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a0a7b5d..f9b9c7c 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -72,6 +72,8 @@
 {
 	if (!(lr_desc.state & LR_STATE_MASK))
 		vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
+	else
+		vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -84,6 +86,11 @@
 	return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
 }
 
+static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
+}
+
 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
 	u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
@@ -148,6 +155,7 @@
 	.sync_lr_elrsr		= vgic_v2_sync_lr_elrsr,
 	.get_elrsr		= vgic_v2_get_elrsr,
 	.get_eisr		= vgic_v2_get_eisr,
+	.clear_eisr		= vgic_v2_clear_eisr,
 	.get_interrupt_status	= vgic_v2_get_interrupt_status,
 	.enable_underflow	= vgic_v2_enable_underflow,
 	.disable_underflow	= vgic_v2_disable_underflow,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 3a62d8a..dff0602 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -104,6 +104,8 @@
 {
 	if (!(lr_desc.state & LR_STATE_MASK))
 		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
+	else
+		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
 }
 
 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -116,6 +118,11 @@
 	return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
 }
 
+static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
+}
+
 static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
 	u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
@@ -192,6 +199,7 @@
 	.sync_lr_elrsr		= vgic_v3_sync_lr_elrsr,
 	.get_elrsr		= vgic_v3_get_elrsr,
 	.get_eisr		= vgic_v3_get_eisr,
+	.clear_eisr		= vgic_v3_clear_eisr,
 	.get_interrupt_status	= vgic_v3_get_interrupt_status,
 	.enable_underflow	= vgic_v3_enable_underflow,
 	.disable_underflow	= vgic_v3_disable_underflow,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 0cc6ab6..c9f60f5 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -883,6 +883,11 @@
 	return vgic_ops->get_eisr(vcpu);
 }
 
+static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
+{
+	vgic_ops->clear_eisr(vcpu);
+}
+
 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
 {
 	return vgic_ops->get_interrupt_status(vcpu);
@@ -922,6 +927,7 @@
 	vgic_set_lr(vcpu, lr_nr, vlr);
 	clear_bit(lr_nr, vgic_cpu->lr_used);
 	vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
+	vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
 }
 
 /*
@@ -978,6 +984,7 @@
 			BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
 			vlr.state |= LR_STATE_PENDING;
 			vgic_set_lr(vcpu, lr, vlr);
+			vgic_sync_lr_elrsr(vcpu, lr, vlr);
 			return true;
 		}
 	}
@@ -999,6 +1006,7 @@
 		vlr.state |= LR_EOI_INT;
 
 	vgic_set_lr(vcpu, lr, vlr);
+	vgic_sync_lr_elrsr(vcpu, lr, vlr);
 
 	return true;
 }
@@ -1136,6 +1144,14 @@
 	if (status & INT_STATUS_UNDERFLOW)
 		vgic_disable_underflow(vcpu);
 
+	/*
+	 * In the next iterations of the vcpu loop, if we sync the vgic state
+	 * after flushing it, but before entering the guest (this happens for
+	 * pending signals and vmid rollovers), then make sure we don't pick
+	 * up any old maintenance interrupts here.
+	 */
+	vgic_clear_eisr(vcpu);
+
 	return level_pending;
 }
 
@@ -1583,8 +1599,10 @@
 	 * emulation. So check this here again. KVM_CREATE_DEVICE does
 	 * the proper checks already.
 	 */
-	if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
-		return -ENODEV;
+	if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
+		ret = -ENODEV;
+		goto out;
+	}
 
 	/*
 	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a109370..a2214d9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2492,6 +2492,7 @@
 	case KVM_CAP_SIGNAL_MSI:
 #endif
 #ifdef CONFIG_HAVE_KVM_IRQFD
+	case KVM_CAP_IRQFD:
 	case KVM_CAP_IRQFD_RESAMPLE:
 #endif
 	case KVM_CAP_CHECK_EXTENSION_VM: